/**
* Channel handler that responds to channelInactive event and reconnects the session.
- * Only if the initial connection was successfully established and promise was not canceled.
+ * Only if the promise was not canceled.
*/
private static final class ClosedChannelHandler extends ChannelInboundHandlerAdapter {
private final ReconnectPromise<?, ?> promise;
assertFalse(session.isSuccess());
}
+ @Test
+ public void testNegotiationFailedReconnect() throws Exception {
+ final Promise<Boolean> p = new DefaultPromise<>(GlobalEventExecutor.INSTANCE);
+
+ this.dispatcher = getServerDispatcher(p);
+
+ this.server = this.dispatcher.createServer(this.serverAddress, new SessionListenerFactory<SimpleSessionListener>() {
+ @Override
+ public SimpleSessionListener getSessionListener() {
+ return new SimpleSessionListener();
+ }
+ });
+
+ this.server.get();
+
+ this.clientDispatcher = new SimpleDispatcher(new SessionNegotiatorFactory<SimpleMessage, SimpleSession, SimpleSessionListener>() {
+ @Override
+ public SessionNegotiator<SimpleSession> getSessionNegotiator(final SessionListenerFactory<SimpleSessionListener> factory,
+ final Channel channel, final Promise<SimpleSession> promise) {
+
+ return new SimpleSessionNegotiator(promise, channel) {
+ @Override
+ protected void startNegotiation() throws Exception {
+ negotiationFailed(new IllegalStateException("Negotiation failed"));
+ }
+ };
+ }
+ }, new DefaultPromise<SimpleSession>(GlobalEventExecutor.INSTANCE), eventLoopGroup);
+
+ final ReconnectStrategyFactory reconnectStrategyFactory = mock(ReconnectStrategyFactory.class);
+ final ReconnectStrategy reconnectStrategy = getMockedReconnectStrategy();
+ doReturn(reconnectStrategy).when(reconnectStrategyFactory).createReconnectStrategy();
+
+ this.clientDispatcher.createReconnectingClient(this.serverAddress,
+ reconnectStrategyFactory, new SessionListenerFactory<SimpleSessionListener>() {
+ @Override
+ public SimpleSessionListener getSessionListener() {
+ return new SimpleSessionListener();
+ }
+ });
+
+
+ // Reconnect strategy should be consulted at least twice, for initial connect and reconnect attempts after drop
+ verify(reconnectStrategyFactory, timeout((int) TimeUnit.MINUTES.toMillis(3)).atLeast(2)).createReconnectStrategy();
+ }
+
private SimpleDispatcher getClientDispatcher() {
return new SimpleDispatcher(new SessionNegotiatorFactory<SimpleMessage, SimpleSession, SimpleSessionListener>() {
@Override
@Before
public void setUp() throws Exception {
file = Files.createTempFile("testFilePersist", ".txt").toFile();
+ file.deleteOnExit();
if (!file.exists()) {
return;
}
@Test
public void testNoLastConfig() throws Exception {
File file = Files.createTempFile("testFilePersist", ".txt").toFile();
+ file.deleteOnExit();
if (!file.exists()) {
return;
}
<ignore/>
</action>
</pluginExecution>
+ <pluginExecution>
+ <pluginExecutionFilter>
+ <groupId>org.opendaylight.yangtools</groupId>
+ <artifactId>yang-maven-plugin</artifactId>
+ <versionRange>[0.5,)</versionRange>
+ <goals>
+ <goal>generate-sources</goal>
+ </goals>
+ </pluginExecutionFilter>
+ <action>
+ <execute></execute>
+ </action>
+ </pluginExecution>
</pluginExecutions>
</lifecycleMappingMetadata>
</configuration>
</modules>
</profile>
</profiles>
-</project>
\ No newline at end of file
+</project>
import org.osgi.framework.BundleContext;
+@Deprecated
public abstract class AbstractBindingAwareConsumer extends AbstractBrokerAwareActivator implements BindingAwareConsumer {
@Override
import org.opendaylight.yangtools.yang.binding.RpcService;
import org.osgi.framework.BundleContext;
+@Deprecated
public abstract class AbstractBindingAwareProvider extends AbstractBrokerAwareActivator implements BindingAwareProvider {
@Override
}
public List<?> execute(Object o) {
- List<Object> result = new LinkedList<>();
if (o == null) {
return null;
}
- if (Set.class.isAssignableFrom(o.getClass())) {
- Set<?> lst = (Set<?>) o;
- for (Object oo : lst) {
+ List<Object> result = new LinkedList<>();
+ if (o instanceof Set) {
+ for (Object oo : (Set<?>) o) {
addToResult(result, execute(oo));
}
- return result;
- } else if (List.class.isAssignableFrom(o.getClass())) {
- List<?> lst = (List<?>) o;
- for (Object oo : lst) {
+ } else if (o instanceof List) {
+ for (Object oo : (List<?>) o) {
addToResult(result, execute(oo));
}
- return result;
- } else if (Map.class.isAssignableFrom(o.getClass())) {
- Map<?, ?> map = (Map<?, ?>) o;
- for (Object oo : map.values()) {
+ } else if (o instanceof Map) {
+ for (Object oo : ((Map<?, ?>) o).values()) {
addToResult(result, execute(oo));
}
- return result;
+ } else {
+ addToResult(result, XSQLCriteria.getValue(o, this.property));
}
-
- addToResult(result, XSQLCriteria.getValue(o, this.property));
-
return result;
}
private static void addToResult(List<Object> result, Object o) {
- if (o == null) {
- return;
- }
- if (Set.class.isAssignableFrom(o.getClass())) {
- Set<?> lst = (Set<?>) o;
- for (Object oo : lst) {
- result.add(oo);
- }
- } else if (List.class.isAssignableFrom(o.getClass())) {
- List<?> lst = (List<?>) o;
- for (Object oo : lst) {
- result.add(oo);
- }
- } else if (Map.class.isAssignableFrom(o.getClass())) {
- Map<?, ?> map = (Map<?, ?>) o;
- for (Object oo : map.values()) {
- result.add(oo);
- }
- } else {
+ if (o instanceof Set) {
+ result.addAll((Set<?>)o);
+ } else if (o instanceof List) {
+ result.addAll((List<?>)o);
+ } else if (o instanceof Map) {
+ result.addAll(((Map<?, ?>)o).values());
+ } else if (o != null) {
result.add(o);
}
}
-
}
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.timeout;
import static org.mockito.Mockito.verify;
import static org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants.RPC_REPLY_KEY;
import static org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0;
import com.google.common.collect.Sets;
import com.google.common.util.concurrent.ListenableFuture;
import io.netty.channel.ChannelFuture;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.nio.NioEventLoopGroup;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
import io.netty.util.concurrent.Future;
import io.netty.util.concurrent.GenericFutureListener;
+import io.netty.util.concurrent.GlobalEventExecutor;
import java.io.ByteArrayInputStream;
+import java.net.InetSocketAddress;
import java.util.Collection;
import java.util.Collections;
import java.util.UUID;
import org.mockito.MockitoAnnotations;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.api.NetconfTerminationReason;
+import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
import org.opendaylight.controller.netconf.client.NetconfClientSession;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.LoginPassword;
import org.opendaylight.controller.sal.connect.api.RemoteDevice;
import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+import org.opendaylight.protocol.framework.ReconnectStrategyFactory;
+import org.opendaylight.protocol.framework.TimedReconnectStrategy;
import org.opendaylight.yangtools.yang.common.QName;
import org.opendaylight.yangtools.yang.common.RpcError;
import org.opendaylight.yangtools.yang.common.RpcResult;
errorInfo.contains( "<bad-element>bar</bad-element>" ) );
}
+ /**
+ * Test whether reconnect is scheduled properly
+ */
+ @Test
+ public void testNetconfDeviceReconnectInCommunicator() throws Exception {
+ final RemoteDevice<NetconfSessionCapabilities, NetconfMessage> device = mock(RemoteDevice.class);
+
+ final TimedReconnectStrategy timedReconnectStrategy = new TimedReconnectStrategy(GlobalEventExecutor.INSTANCE, 10000, 0, 1.0, null, 100L, null);
+ final ReconnectStrategy reconnectStrategy = spy(new ReconnectStrategy() {
+ @Override
+ public int getConnectTimeout() throws Exception {
+ return timedReconnectStrategy.getConnectTimeout();
+ }
+
+ @Override
+ public Future<Void> scheduleReconnect(final Throwable cause) {
+ return timedReconnectStrategy.scheduleReconnect(cause);
+ }
+
+ @Override
+ public void reconnectSuccessful() {
+ timedReconnectStrategy.reconnectSuccessful();
+ }
+ });
+
+ final NetconfDeviceCommunicator listener = new NetconfDeviceCommunicator(new RemoteDeviceId("test"), device);
+ final EventLoopGroup group = new NioEventLoopGroup();
+ final Timer time = new HashedWheelTimer();
+ try {
+ final NetconfClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create()
+ .withAddress(new InetSocketAddress("localhost", 65000))
+ .withReconnectStrategy(reconnectStrategy)
+ .withConnectStrategyFactory(new ReconnectStrategyFactory() {
+ @Override
+ public ReconnectStrategy createReconnectStrategy() {
+ return reconnectStrategy;
+ }
+ })
+ .withAuthHandler(new LoginPassword("admin", "admin"))
+ .withConnectionTimeoutMillis(10000)
+ .withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH)
+ .withSessionListener(listener)
+ .build();
+
+
+ listener.initializeRemoteConnection(new NetconfClientDispatcherImpl(group, group, time), cfg);
+
+ verify(reconnectStrategy, timeout((int) TimeUnit.MINUTES.toMillis(3)).times(101)).scheduleReconnect(any(Throwable.class));
+ } finally {
+ time.stop();
+ group.shutdownGracefully();
+ }
+ }
+
@Test
public void testOnResponseMessageWithWrongMessageID() throws Exception {
setupSession();
<dependency>
<groupId>org.opendaylight.controller.samples</groupId>
<artifactId>clustering-it-model</artifactId>
- <version>${version}</version>
+ <version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.opendaylight.controller</groupId>
/**
* All disconnected Nodes need be removed from stat list Nodes
+ *
* @param flowNode
* @return true/false if the {@link Node} removed successful
*/
boolean disconnectedNodeUnregistration(InstanceIdentifier<Node> nodeIdent);
+ /**
+ * Method add new feature {@link StatCapabTypes} to Node identified by
+ * nodeIdent -> InstanceIdentifier<Node>
+ *
+ * @param flowNode
+ * @return true/false if the {@link StatCapabTypes} add successful
+ */
+ boolean registerAdditionalNodeFeature(InstanceIdentifier<Node> nodeIdent, StatCapabTypes statCapab);
+
/**
* Method return true only and only if {@link StatPermCollector} contain
* valid node registration in its internal {@link Node} map.
* Otherwise return false.
*
- * @param InstanceIdentifier<FlowCapableNode> flowNode
+ * @param flowNode
* @return
*/
boolean isProvidedFlowNodeActive(InstanceIdentifier<Node> nodeIdent);
*/
void disconnectedNodeUnregistration(InstanceIdentifier<Node> nodeIdent);
+ /**
+ * Method wraps {@link StatPermCollector}.registerAdditionalNodeFeature to provide
+ * possibility to register additional Node Feature {@link StatCapabTypes} for
+ * statistics collecting.
+ *
+ * @param nodeIdent
+ * @param statCapab
+ */
+ void registerAdditionalNodeFeature(InstanceIdentifier<Node> nodeIdent, StatCapabTypes statCapab);
+
/**
* Method provides access to Device RPC methods by wrapped
* internal method. In next {@link StatRpcMsgManager} is registered all
private ListenerRegistration<DataChangeListener> listenerRegistration;
protected final Map<InstanceIdentifier<Node>, Map<InstanceIdentifier<T>, Integer>> mapNodesForDelete = new ConcurrentHashMap<>();
+ protected final Map<InstanceIdentifier<Node>, Integer> mapNodeFeautureRepeater = new ConcurrentHashMap<>();
private final Class<T> clazz;
super.close();
}
+ /**
+ * Method return actual DataObject identified by InstanceIdentifier from Config/DS
+ * @param path
+ * @return
+ */
protected final <K extends DataObject> Optional<K> readLatestConfiguration(final InstanceIdentifier<K> path) {
if(currentReadTx == null) {
currentReadTx = dataBroker.newReadOnlyTransaction();
return txContainer;
}
+ /**
+ * Method validate TransactionCacheContainer. It needs to call before every txCacheContainer processing.
+ *
+ * @param txCacheContainer
+ * @return
+ */
+ protected boolean isTransactionCacheContainerValid(final Optional<TransactionCacheContainer<?>> txCacheContainer) {
+ if ( ! txCacheContainer.isPresent()) {
+ LOG.debug("Transaction Cache Container is not presented!");
+ return false;
+ }
+ if (txCacheContainer.get().getNodeId() == null) {
+ LOG.debug("Transaction Cache Container {} don't have Node ID!", txCacheContainer.get().getId());
+ return false;
+ }
+ if (txCacheContainer.get().getNotifications() == null) {
+ LOG.debug("Transaction Cache Container {} for {} node don't have Notifications!",
+ txCacheContainer.get().getId(), txCacheContainer.get().getNodeId());
+ return false;
+ }
+ return true;
+ }
+
/**
* Wrapping Future object call to {@link org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager}
* isExpectedStatistics with 10sec TimeOut.
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupDescStats;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupDescStatsBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupFeaturesBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.NodeGroupStatisticsBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.OpendaylightGroupStatisticsListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.desc.GroupDescBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.group.statistics.rev131111.group.features.GroupFeatures;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
/**
* statistics-manager
final TransactionId transId = notification.getTransactionId();
final NodeId nodeId = notification.getId();
if ( ! isExpectedStatistics(transId, nodeId)) {
- LOG.debug("STAT-MANAGER - GroupDescStatsUpdated: unregistred notification detect TransactionId {}", transId);
+ LOG.debug("Unregistred notification detect TransactionId {}", transId);
return;
}
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
if (notification.isMoreReplies()) {
- manager.getRpcMsgManager().addNotification(notification, nodeId);
return;
}
- final List<GroupDescStats> groupStats = notification.getGroupDescStats() != null
- ? new ArrayList<>(notification.getGroupDescStats()) : new ArrayList<GroupDescStats>(10);
- final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
- if (txContainer.isPresent()) {
- final List<? extends TransactionAware> cacheNotifs =
- txContainer.get().getNotifications();
- for (final TransactionAware notif : cacheNotifs) {
- if (notif instanceof GroupDescStatsUpdated) {
- groupStats.addAll(((GroupDescStatsUpdated) notif).getGroupDescStats());
- }
- }
- }
- final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
- .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ /* Don't block RPC Notification thread */
manager.enqueue(new StatDataStoreOperation() {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
- statGroupDescCommit(groupStats, nodeIdent, tx);
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ /* Validate exist FlowCapableNode */
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+ Optional<FlowCapableNode> fNode = Optional.absent();
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL,fNodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ }
+ if ( ! fNode.isPresent()) {
+ return;
+ }
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ /* Prepare List actual Groups and not updated Groups will be removed */
+ final List<Group> existGroups = fNode.get().getGroup() != null
+ ? fNode.get().getGroup() : Collections.<Group> emptyList();
+ final List<GroupKey> existGroupKeys = new ArrayList<>();
+ for (final Group group : existGroups) {
+ existGroupKeys.add(group.getKey());
+ }
+ /* GroupDesc processing */
+ statGroupDescCommit(txContainer, tx, fNodeIdent, existGroupKeys);
+ /* Delete all not presented Group Nodes */
+ deleteAllNotPresentNode(fNodeIdent, tx, Collections.unmodifiableList(existGroupKeys));
/* Notification for continue collecting statistics */
notifyToCollectNextStatistics(nodeIdent);
}
@Override
public void onGroupFeaturesUpdated(final GroupFeaturesUpdated notification) {
+ Preconditions.checkNotNull(notification);
final TransactionId transId = notification.getTransactionId();
final NodeId nodeId = notification.getId();
if ( ! isExpectedStatistics(transId, nodeId)) {
- LOG.debug("STAT-MANAGER - MeterFeaturesUpdated: unregistred notification detect TransactionId {}", transId);
+ LOG.debug("Unregistred notification detect TransactionId {}", transId);
return;
}
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
if (notification.isMoreReplies()) {
- manager.getRpcMsgManager().addNotification(notification, nodeId);
- return;
- }
- final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
- if ( ! txContainer.isPresent()) {
return;
}
- final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
- .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ /* Don't block RPC Notification thread */
manager.enqueue(new StatDataStoreOperation() {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
- notifyToCollectNextStatistics(nodeIdent);
- final GroupFeatures stats = new GroupFeaturesBuilder(notification).build();
- final InstanceIdentifier<GroupFeatures> groupFeatureIdent = nodeIdent
- .augmentation(NodeGroupFeatures.class).child(GroupFeatures.class);
- Optional<Node> node = Optional.absent();
- try {
- node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
- }
- catch (final ReadFailedException e) {
- LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
}
- if (node.isPresent()) {
- tx.put(LogicalDatastoreType.OPERATIONAL, groupFeatureIdent, stats);
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof GroupFeaturesUpdated)) {
+ break;
+ }
+ final GroupFeatures stats = new GroupFeaturesBuilder((GroupFeaturesUpdated)notif).build();
+ final InstanceIdentifier<NodeGroupFeatures> nodeGroupFeatureIdent =
+ nodeIdent.augmentation(NodeGroupFeatures.class);
+ final InstanceIdentifier<GroupFeatures> groupFeatureIdent = nodeGroupFeatureIdent
+ .child(GroupFeatures.class);
+ Optional<Node> node = Optional.absent();
+ try {
+ node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if (node.isPresent()) {
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nodeGroupFeatureIdent, new NodeGroupFeaturesBuilder().build(), true);
+ tx.put(LogicalDatastoreType.OPERATIONAL, groupFeatureIdent, stats);
+ manager.registerAdditionalNodeFeature(nodeIdent, StatCapabTypes.GROUP_STATS);
+ }
}
}
});
@Override
public void onGroupStatisticsUpdated(final GroupStatisticsUpdated notification) {
+ Preconditions.checkNotNull(notification);
final TransactionId transId = notification.getTransactionId();
final NodeId nodeId = notification.getId();
if ( ! isExpectedStatistics(transId, nodeId)) {
LOG.debug("STAT-MANAGER - GroupStatisticsUpdated: unregistred notification detect TransactionId {}", transId);
return;
}
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
if (notification.isMoreReplies()) {
- manager.getRpcMsgManager().addNotification(notification, nodeId);
return;
}
- final List<GroupStats> groupStats = notification.getGroupStats() != null
- ? new ArrayList<>(notification.getGroupStats()) : new ArrayList<GroupStats>(10);
- Optional<Group> notifGroup = Optional.absent();
- final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
- if (txContainer.isPresent()) {
- final Optional<? extends DataObject> inputObj = txContainer.get().getConfInput();
- if (inputObj.isPresent() && inputObj.get() instanceof Group) {
- notifGroup = Optional.<Group> of((Group)inputObj.get());
- }
- final List<? extends TransactionAware> cacheNotifs =
- txContainer.get().getNotifications();
- for (final TransactionAware notif : cacheNotifs) {
- if (notif instanceof GroupStatisticsUpdated) {
- groupStats.addAll(((GroupStatisticsUpdated) notif).getGroupStats());
- }
- }
- }
- final Optional<Group> group = notifGroup;
- final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
- .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ /* Don't block RPC Notification thread */
manager.enqueue(new StatDataStoreOperation() {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
- /* Notification for continue collecting statistics */
- if ( ! group.isPresent()) {
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ /* Node exist check */
+ Optional<Node> node = Optional.absent();
+ try {
+ node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if ( ! node.isPresent()) {
+ return;
+ }
+
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+
+ Optional<Group> notifGroup = Optional.absent();
+ final Optional<? extends DataObject> inputObj = txContainer.get().getConfInput();
+ if (inputObj.isPresent() && inputObj.get() instanceof Group) {
+ notifGroup = Optional.<Group> of((Group)inputObj.get());
+ }
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof GroupStatisticsUpdated)) {
+ break;
+ }
+ statGroupCommit(((GroupStatisticsUpdated) notif).getGroupStats(), nodeIdent, tx);
+ }
+ if (notifGroup.isPresent()) {
notifyToCollectNextStatistics(nodeIdent);
}
- statGroupCommit(groupStats, nodeIdent, group, tx);
}
});
}
private void statGroupCommit(final List<GroupStats> groupStats, final InstanceIdentifier<Node> nodeIdent,
- final Optional<Group> group, final ReadWriteTransaction trans) {
+ final ReadWriteTransaction tx) {
+
+ Preconditions.checkNotNull(groupStats);
+ Preconditions.checkNotNull(nodeIdent);
+ Preconditions.checkNotNull(tx);
+
final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
- for (final GroupStats groupStat : groupStats) {
- final GroupStatistics stats = new GroupStatisticsBuilder(groupStat).build();
+ for (final GroupStats gStat : groupStats) {
+ final GroupStatistics stats = new GroupStatisticsBuilder(gStat).build();
- final GroupKey groupKey = new GroupKey(groupStat.getGroupId());
- final InstanceIdentifier<GroupStatistics> gsIdent = fNodeIdent
- .child(Group.class,groupKey).augmentation(NodeGroupStatistics.class)
- .child(GroupStatistics.class);
+ final InstanceIdentifier<Group> groupIdent = fNodeIdent.child(Group.class, new GroupKey(gStat.getGroupId()));
+ final InstanceIdentifier<NodeGroupStatistics> nGroupStatIdent =groupIdent
+ .augmentation(NodeGroupStatistics.class);
+ final InstanceIdentifier<GroupStatistics> gsIdent = nGroupStatIdent.child(GroupStatistics.class);
/* Statistics Writing */
- Optional<FlowCapableNode> fNode = Optional.absent();
+ Optional<Group> group = Optional.absent();
try {
- fNode = trans.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
+ group = tx.read(LogicalDatastoreType.OPERATIONAL, groupIdent).checkedGet();
}
catch (final ReadFailedException e) {
- LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ LOG.debug("Read Operational/DS for Group node fail! {}", groupIdent, e);
}
- if (fNode.isPresent()) {
- trans.put(LogicalDatastoreType.OPERATIONAL, gsIdent, stats);
+ if (group.isPresent()) {
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nGroupStatIdent, new NodeGroupStatisticsBuilder().build(), true);
+ tx.put(LogicalDatastoreType.OPERATIONAL, gsIdent, stats);
}
}
}
- private void statGroupDescCommit(final List<GroupDescStats> groupStats, final InstanceIdentifier<Node> nodeIdent,
- final ReadWriteTransaction trans) {
- final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+ private void statGroupDescCommit(final Optional<TransactionCacheContainer<?>> txContainer, final ReadWriteTransaction tx,
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent, final List<GroupKey> existGroupKeys) {
- final List<GroupKey> deviceGroupKeys = new ArrayList<>();
+ Preconditions.checkNotNull(existGroupKeys);
+ Preconditions.checkNotNull(txContainer);
+ Preconditions.checkNotNull(fNodeIdent);
+ Preconditions.checkNotNull(tx);
- for (final GroupDescStats group : groupStats) {
- if (group.getGroupId() != null) {
- final GroupBuilder groupBuilder = new GroupBuilder(group);
- final GroupKey groupKey = new GroupKey(group.getGroupId());
- final InstanceIdentifier<Group> groupRef = fNodeIdent.child(Group.class,groupKey);
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof GroupDescStatsUpdated)) {
+ break;
+ }
+ final List<GroupDescStats> groupStats = ((GroupDescStatsUpdated) notif).getGroupDescStats();
+ if (groupStats == null) {
+ break;
+ }
+ for (final GroupDescStats group : groupStats) {
+ if (group.getGroupId() != null) {
+ final GroupBuilder groupBuilder = new GroupBuilder(group);
+ final GroupKey groupKey = new GroupKey(group.getGroupId());
+ final InstanceIdentifier<Group> groupRef = fNodeIdent.child(Group.class,groupKey);
- final NodeGroupDescStatsBuilder groupDesc= new NodeGroupDescStatsBuilder();
- groupDesc.setGroupDesc(new GroupDescBuilder(group).build());
- //Update augmented data
- groupBuilder.addAugmentation(NodeGroupDescStats.class, groupDesc.build());
- deviceGroupKeys.add(groupKey);
- Optional<FlowCapableNode> hashIdUpd = Optional.absent();
- try {
- hashIdUpd = trans.read(LogicalDatastoreType.OPERATIONAL,fNodeIdent).checkedGet();
- }
- catch (final ReadFailedException e) {
- LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
- }
- if (hashIdUpd.isPresent()) {
- trans.put(LogicalDatastoreType.OPERATIONAL, groupRef, groupBuilder.build());
+ final NodeGroupDescStatsBuilder groupDesc= new NodeGroupDescStatsBuilder();
+ groupDesc.setGroupDesc(new GroupDescBuilder(group).build());
+ //Update augmented data
+ groupBuilder.addAugmentation(NodeGroupDescStats.class, groupDesc.build());
+ existGroupKeys.remove(groupKey);
+ tx.put(LogicalDatastoreType.OPERATIONAL, groupRef, groupBuilder.build());
}
}
}
- /* Delete all not presented Group Nodes */
- deleteAllNotPresentNode(fNodeIdent, trans, deviceGroupKeys);
}
private void deleteAllNotPresentNode(final InstanceIdentifier<FlowCapableNode> fNodeIdent,
final ReadWriteTransaction trans, final List<GroupKey> deviceGroupKeys) {
- final Optional<FlowCapableNode> fNode = readLatestConfiguration(fNodeIdent);
- if ( ! fNode.isPresent()) {
- LOG.trace("Read Operational/DS for FlowCapableNode fail! Node {} doesn't exist.", fNodeIdent);
+ Preconditions.checkNotNull(fNodeIdent);
+ Preconditions.checkNotNull(trans);
+
+ if (deviceGroupKeys == null) {
return;
}
- final List<Group> existGroups = fNode.get().getGroup() != null
- ? fNode.get().getGroup() : Collections.<Group> emptyList();
- /* Add all existed groups paths - no updated paths has to be removed */
- for (final Group group : existGroups) {
- if (deviceGroupKeys.remove(group.getKey())) {
- break; // group still exist on device
- }
- LOG.trace("Group {} has to removed.", group);
- final InstanceIdentifier<Group> delGroupIdent = fNodeIdent.child(Group.class, group.getKey());
+
+ for (final GroupKey key : deviceGroupKeys) {
+ final InstanceIdentifier<Group> delGroupIdent = fNodeIdent.child(Group.class, key);
+ LOG.trace("Group {} has to removed.", key);
Optional<Group> delGroup = Optional.absent();
try {
delGroup = trans.read(LogicalDatastoreType.OPERATIONAL, delGroupIdent).checkedGet();
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
import org.opendaylight.controller.md.statistics.manager.StatRpcMsgManager.TransactionCacheContainer;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager.StatDataStoreOperation;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterConfigStats;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterConfigStatsBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeatures;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeaturesBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterStatistics;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterStatisticsBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.OpendaylightMeterStatisticsListener;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.MeterFeatures;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.MeterFeaturesBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.nodes.node.meter.MeterStatisticsBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.config.stats.reply.MeterConfigStats;
import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.types.rev130918.meter.statistics.reply.MeterStats;
+import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
/**
* statistics-manager
LOG.debug("STAT-MANAGER - MeterConfigStatsUpdated: unregistred notification detect TransactionId {}", transId);
return;
}
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
if (notification.isMoreReplies()) {
- manager.getRpcMsgManager().addNotification(notification, nodeId);
return;
}
- final List<MeterConfigStats> meterConfStat = notification.getMeterConfigStats() != null
- ? new ArrayList<>(notification.getMeterConfigStats()) : new ArrayList<MeterConfigStats>(10);
- final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
- if (txContainer.isPresent()) {
- final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
- for (final TransactionAware notif : cacheNotifs) {
- if (notif instanceof MeterConfigStatsUpdated) {
- meterConfStat.addAll(((MeterConfigStatsUpdated) notif).getMeterConfigStats());
- }
- }
- }
- final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ /* Don't block RPC Notification thread */
manager.enqueue(new StatDataStoreOperation() {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ /* Validate exist FlowCapableNode */
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+ Optional<FlowCapableNode> fNode = Optional.absent();
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL,fNodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
+ }
+ if ( ! fNode.isPresent()) {
+ return;
+ }
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ /* Prepare List actual Meters and not updated Meters will be removed */
+ final List<Meter> existMeters = fNode.get().getMeter() != null
+ ? fNode.get().getMeter() : Collections.<Meter> emptyList();
+ final List<MeterKey> existMeterKeys = new ArrayList<>();
+ for (final Meter meter : existMeters) {
+ existMeterKeys.add(meter.getKey());
+ }
+ /* MeterConfig processing */
+ comitConfMeterStats(txContainer, tx, fNodeIdent, existMeterKeys);
+ /* Delete all not presented Meter Nodes */
+ deleteAllNotPresentedNodes(fNodeIdent, tx, Collections.unmodifiableList(existMeterKeys));
/* Notification for continue collecting statistics */
notifyToCollectNextStatistics(nodeIdent);
- comitConfMeterStats(meterConfStat, nodeIdent, tx);
}
});
}
@Override
public void onMeterFeaturesUpdated(final MeterFeaturesUpdated notification) {
+ Preconditions.checkNotNull(notification);
final TransactionId transId = notification.getTransactionId();
final NodeId nodeId = notification.getId();
if ( ! isExpectedStatistics(transId, nodeId)) {
LOG.debug("STAT-MANAGER - MeterFeaturesUpdated: unregistred notification detect TransactionId {}", transId);
return;
}
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
if (notification.isMoreReplies()) {
- manager.getRpcMsgManager().addNotification(notification, nodeId);
- return;
- }
- final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
- if ( ! txContainer.isPresent()) {
return;
}
- final MeterFeatures stats = new MeterFeaturesBuilder(notification).build();
- final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
- .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
- final InstanceIdentifier<MeterFeatures> meterFeatureIdent = nodeIdent
- .augmentation(NodeMeterFeatures.class).child(MeterFeatures.class);
+ /* Don't block RPC Notification thread */
manager.enqueue(new StatDataStoreOperation() {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
- /* Notification for continue collecting statistics */
- notifyToCollectNextStatistics(nodeIdent);
- Optional<Node> node = Optional.absent();
- try {
- node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
- }
- catch (final ReadFailedException e) {
- LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
}
- if (node.isPresent()) {
- tx.put(LogicalDatastoreType.OPERATIONAL, meterFeatureIdent, stats);
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof MeterFeaturesUpdated)) {
+ break;
+ }
+ final MeterFeatures stats = new MeterFeaturesBuilder((MeterFeaturesUpdated)notif).build();
+ final InstanceIdentifier<NodeMeterFeatures> nodeMeterFeatureIdent =
+ nodeIdent.augmentation(NodeMeterFeatures.class);
+ final InstanceIdentifier<MeterFeatures> meterFeatureIdent = nodeMeterFeatureIdent
+ .child(MeterFeatures.class);
+ Optional<Node> node = Optional.absent();
+ try {
+ node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if (node.isPresent()) {
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nodeMeterFeatureIdent, new NodeMeterFeaturesBuilder().build(), true);
+ tx.put(LogicalDatastoreType.OPERATIONAL, meterFeatureIdent, stats);
+ manager.registerAdditionalNodeFeature(nodeIdent, StatCapabTypes.METER_STATS);
+ }
}
}
});
@Override
public void onMeterStatisticsUpdated(final MeterStatisticsUpdated notification) {
+ Preconditions.checkNotNull(notification);
final TransactionId transId = notification.getTransactionId();
final NodeId nodeId = notification.getId();
if ( ! isExpectedStatistics(transId, nodeId)) {
LOG.debug("STAT-MANAGER - MeterStatisticsUpdated: unregistred notification detect TransactionId {}", transId);
return;
}
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
if (notification.isMoreReplies()) {
- manager.getRpcMsgManager().addNotification(notification, nodeId);
return;
}
- final List<MeterStats> meterStat = notification.getMeterStats() != null
- ? new ArrayList<>(notification.getMeterStats()) : new ArrayList<MeterStats>(10);
- final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
- if (txContainer.isPresent()) {
- final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
- for (final TransactionAware notif : cacheNotifs) {
- if (notif instanceof MeterConfigStatsUpdated) {
- meterStat.addAll(((MeterStatisticsUpdated) notif).getMeterStats());
- }
- }
- }
- final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+
+ /* Don't block RPC Notification thread */
manager.enqueue(new StatDataStoreOperation() {
@Override
public void applyOperation(final ReadWriteTransaction tx) {
- statMeterCommit(meterStat, nodeIdent, tx);
- /* Notification for continue collecting statistics */
- notifyToCollectNextStatistics(nodeIdent);
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier
+ .create(Nodes.class).child(Node.class, new NodeKey(nodeId));
+ /* Node exist check */
+ Optional<Node> node = Optional.absent();
+ try {
+ node = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if ( ! node.isPresent()) {
+ return;
+ }
+
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+
+ Optional<Meter> notifMeter = Optional.absent();
+ final Optional<? extends DataObject> inputObj = txContainer.get().getConfInput();
+ if (inputObj.isPresent() && inputObj.get() instanceof Meter) {
+ notifMeter = Optional.<Meter> of((Meter)inputObj.get());
+ }
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof MeterStatisticsUpdated)) {
+ break;
+ }
+ statMeterCommit(((MeterStatisticsUpdated) notif).getMeterStats(), nodeIdent, tx);
+ }
+ if (notifMeter.isPresent()) {
+ notifyToCollectNextStatistics(nodeIdent);
+ }
}
});
}
private void statMeterCommit(final List<MeterStats> meterStats,
- final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction trans) {
+ final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction tx) {
+
+ Preconditions.checkNotNull(meterStats);
+ Preconditions.checkNotNull(nodeIdent);
+ Preconditions.checkNotNull(tx);
final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
+
for (final MeterStats mStat : meterStats) {
final MeterStatistics stats = new MeterStatisticsBuilder(mStat).build();
- final MeterKey mKey = new MeterKey(mStat.getMeterId());
- final InstanceIdentifier<MeterStatistics> msIdent = fNodeIdent
- .child(Meter.class, mKey).augmentation(NodeMeterStatistics.class)
- .child(MeterStatistics.class);
+ final InstanceIdentifier<Meter> meterIdent = fNodeIdent.child(Meter.class, new MeterKey(mStat.getMeterId()));
+ final InstanceIdentifier<NodeMeterStatistics> nodeMeterStatIdent = meterIdent
+ .augmentation(NodeMeterStatistics.class);
+ final InstanceIdentifier<MeterStatistics> msIdent = nodeMeterStatIdent.child(MeterStatistics.class);
/* Meter Statistics commit */
- Optional<FlowCapableNode> fNode = Optional.absent();
+ Optional<Meter> meter = Optional.absent();
try {
- fNode = trans.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
+ meter = tx.read(LogicalDatastoreType.OPERATIONAL, meterIdent).checkedGet();
}
catch (final ReadFailedException e) {
LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
}
- if (fNode.isPresent()) {
- trans.put(LogicalDatastoreType.OPERATIONAL, msIdent, stats);
+ if (meter.isPresent()) {
+ tx.merge(LogicalDatastoreType.OPERATIONAL, nodeMeterStatIdent, new NodeMeterStatisticsBuilder().build(), true);
+ tx.put(LogicalDatastoreType.OPERATIONAL, msIdent, stats);
}
}
}
- private void comitConfMeterStats(final List<MeterConfigStats> meterConfStat,
- final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction trans) {
+ private void comitConfMeterStats(final Optional<TransactionCacheContainer<?>> txContainer, final ReadWriteTransaction tx,
+ final InstanceIdentifier<FlowCapableNode> fNodeIdent, final List<MeterKey> existMeterKeys) {
- final InstanceIdentifier<FlowCapableNode> fNodeIdent = nodeIdent.augmentation(FlowCapableNode.class);
- final List<MeterKey> deviceMeterKeys = new ArrayList<>();
-
- for (final MeterConfigStats meterConf : meterConfStat) {
- final MeterBuilder meterBuilder = new MeterBuilder(meterConf);
- if (meterConf.getMeterId() != null) {
- final MeterKey meterKey = new MeterKey(meterConf.getMeterId());
- meterBuilder.setKey(meterKey);
- final InstanceIdentifier<Meter> meterRef = nodeIdent
- .augmentation(FlowCapableNode.class).child(Meter.class,meterKey);
- final NodeMeterConfigStatsBuilder meterConfig = new NodeMeterConfigStatsBuilder();
- meterConfig.setMeterConfigStats(new MeterConfigStatsBuilder(meterConf).build());
- //Update augmented data
- meterBuilder.addAugmentation(NodeMeterConfigStats.class, meterConfig.build());
- deviceMeterKeys.add(meterKey);
- Optional<FlowCapableNode> fNode = Optional.absent();
- try {
- fNode = trans.read(LogicalDatastoreType.OPERATIONAL, fNodeIdent).checkedGet();
- }
- catch (final ReadFailedException e) {
- LOG.debug("Read Operational/DS for FlowCapableNode fail! {}", fNodeIdent, e);
- }
- if (fNode.isPresent()) {
- trans.put(LogicalDatastoreType.OPERATIONAL, meterRef, meterBuilder.build());
+ Preconditions.checkNotNull(existMeterKeys);
+ Preconditions.checkNotNull(txContainer);
+ Preconditions.checkNotNull(fNodeIdent);
+ Preconditions.checkNotNull(tx);
+
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof MeterConfigStatsUpdated)) {
+ break;
+ }
+ final List<MeterConfigStats> meterStats = ((MeterConfigStatsUpdated) notif).getMeterConfigStats();
+ if (meterStats == null) {
+ break;
+ }
+ for (final MeterConfigStats meterStat : meterStats) {
+ if (meterStat.getMeterId() != null) {
+ final MeterBuilder meterBuilder = new MeterBuilder(meterStat);
+ final MeterKey meterKey = new MeterKey(meterStat.getMeterId());
+ final InstanceIdentifier<Meter> meterRef = fNodeIdent.child(Meter.class, meterKey);
+
+ final NodeMeterConfigStatsBuilder meterConfig = new NodeMeterConfigStatsBuilder();
+ meterConfig.setMeterConfigStats(new MeterConfigStatsBuilder(meterStat).build());
+ //Update augmented data
+ meterBuilder.addAugmentation(NodeMeterConfigStats.class, meterConfig.build());
+ existMeterKeys.remove(meterKey);
+ tx.put(LogicalDatastoreType.OPERATIONAL, meterRef, meterBuilder.build());
}
}
}
- /* Delete all not presented Meter Nodes */
- deleteAllNotPresentedNodes(fNodeIdent, trans, deviceMeterKeys);
}
private void deleteAllNotPresentedNodes(final InstanceIdentifier<FlowCapableNode> fNodeIdent,
- final ReadWriteTransaction trans, final List<MeterKey> deviceMeterKeys) {
- /* Delete all not presented meters */
- final Optional<FlowCapableNode> fNode = readLatestConfiguration(fNodeIdent);
+ final ReadWriteTransaction tx, final List<MeterKey> deviceMeterKeys) {
+
+ Preconditions.checkNotNull(fNodeIdent);
+ Preconditions.checkNotNull(tx);
- if ( ! fNode.isPresent()) {
- LOG.trace("Read Operational/DS for FlowCapableNode fail! Node {} doesn't exist.", fNodeIdent);
+ if (deviceMeterKeys == null) {
return;
}
- final List<Meter> existMeters = fNode.get().getMeter() != null
- ? fNode.get().getMeter() : Collections.<Meter> emptyList();
- /* Add all existed groups paths - no updated paths has to be removed */
- for (final Meter meter : existMeters) {
- if (deviceMeterKeys.remove(meter.getKey())) {
- break; // Meter still exist on device
- }
- final InstanceIdentifier<Meter> delMeterIdent = fNodeIdent.child(Meter.class, meter.getKey());
+
+ for (final MeterKey key : deviceMeterKeys) {
+ final InstanceIdentifier<Meter> delMeterIdent = fNodeIdent.child(Meter.class, key);
+ LOG.trace("Meter {} has to removed.", key);
Optional<Meter> delMeter = Optional.absent();
try {
- delMeter = trans.read(LogicalDatastoreType.OPERATIONAL, delMeterIdent).checkedGet();
+ delMeter = tx.read(LogicalDatastoreType.OPERATIONAL, delMeterIdent).checkedGet();
}
catch (final ReadFailedException e) {
// NOOP - probably another transaction delete that node
}
if (delMeter.isPresent()) {
- trans.delete(LogicalDatastoreType.OPERATIONAL, delMeterIdent);
+ tx.delete(LogicalDatastoreType.OPERATIONAL, delMeterIdent);
}
}
}
package org.opendaylight.controller.md.statistics.manager.impl;
-import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionAware;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.transaction.rev131103.TransactionId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.Queue;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.QueueBuilder;
import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.queues.QueueKey;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
/**
* statistics-manager
LOG.debug("STAT-MANAGER - QueueStatisticsUpdate: unregistred notification detect TransactionId {}", transId);
return;
}
+ manager.getRpcMsgManager().addNotification(notification, nodeId);
if (notification.isMoreReplies()) {
- manager.getRpcMsgManager().addNotification(notification, nodeId);
return;
}
- final List<QueueIdAndStatisticsMap> queueStats = notification.getQueueIdAndStatisticsMap() != null
- ? new ArrayList<>(notification.getQueueIdAndStatisticsMap()) : new ArrayList<QueueIdAndStatisticsMap>(10);
- final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
- if (txContainer.isPresent()) {
- final List<? extends TransactionAware> cachedNotifs =
- txContainer.get().getNotifications();
- for (final TransactionAware notif : cachedNotifs) {
- if (notif instanceof QueueStatisticsUpdate) {
- queueStats.addAll(((QueueStatisticsUpdate) notif).getQueueIdAndStatisticsMap());
- }
- }
- }
- final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class)
- .child(Node.class, new NodeKey(nodeId));
- /* Queue statistics are small size and we are not able to change for OF cross controller
- * - don't need to make are atomic */
+
+ /* Don't block RPC Notification thread */
manager.enqueue(new StatDataStoreOperation() {
@Override
- public void applyOperation(final ReadWriteTransaction trans) {
- /* Notification for continue */
+ public void applyOperation(final ReadWriteTransaction tx) {
+
+ final InstanceIdentifier<Node> nodeIdent = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class, new NodeKey(nodeId));
+
+ /* Validate exist Node */
+ Optional<Node> fNode = Optional.absent();
+ try {
+ fNode = tx.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
+ }
+ if ( ! fNode.isPresent()) {
+ LOG.trace("Read Operational/DS for Node fail! Node {} doesn't exist.", nodeIdent);
+ return;
+ }
+
+ /* Get and Validate TransactionCacheContainer */
+ final Optional<TransactionCacheContainer<?>> txContainer = getTransactionCacheContainer(transId, nodeId);
+ if ( ! isTransactionCacheContainerValid(txContainer)) {
+ return;
+ }
+ /* Prepare List actual Queues and not updated Queues will be removed */
+ final List<NodeConnector> existConnectors = fNode.get().getNodeConnector() != null
+ ? fNode.get().getNodeConnector() : Collections.<NodeConnector> emptyList();
+ final Map<QueueKey, NodeConnectorKey> existQueueKeys = new HashMap<>();
+ for (final NodeConnector connect : existConnectors) {
+ final List<Queue> listQueues = connect.getAugmentation(FlowCapableNodeConnector.class).getQueue();
+ if (listQueues != null) {
+ for (final Queue queue : listQueues) {
+ existQueueKeys.put(queue.getKey(), connect.getKey());
+ }
+ }
+ }
+ /* Queue processing */
+ statQueueCommit(txContainer, tx, nodeIdent, existQueueKeys);
+ /* Delete all not presented Group Nodes */
+ deleteAllNotPresentedNodes(nodeIdent, tx, Collections.unmodifiableMap(existQueueKeys));
+ /* Notification for continue collecting statistics */
notifyToCollectNextStatistics(nodeIdent);
- statQueueCommit(queueStats, nodeIdent, trans);
}
});
}
- private void statQueueCommit(final List<QueueIdAndStatisticsMap> queueStats,
- final InstanceIdentifier<Node> nodeIdent, final ReadWriteTransaction trans) {
+ private void statQueueCommit(
+ final Optional<TransactionCacheContainer<?>> txContainer, final ReadWriteTransaction tx,
+ final InstanceIdentifier<Node> nodeIdent, final Map<QueueKey, NodeConnectorKey> existQueueKeys) {
- /* check exist FlowCapableNode and write statistics */
- Optional<Node> fNode = Optional.absent();
- try {
- fNode = trans.read(LogicalDatastoreType.OPERATIONAL, nodeIdent).checkedGet();
- }
- catch (final ReadFailedException e) {
- LOG.debug("Read Operational/DS for Node fail! {}", nodeIdent, e);
- return;
+ Preconditions.checkNotNull(existQueueKeys);
+ Preconditions.checkNotNull(txContainer);
+ Preconditions.checkNotNull(nodeIdent);
+ Preconditions.checkNotNull(tx);
+
+ final List<? extends TransactionAware> cacheNotifs = txContainer.get().getNotifications();
+ for (final TransactionAware notif : cacheNotifs) {
+ if ( ! (notif instanceof QueueStatisticsUpdate)) {
+ break;
+ }
+ final List<QueueIdAndStatisticsMap> queueStats = ((QueueStatisticsUpdate) notif).getQueueIdAndStatisticsMap();
+ if (queueStats == null) {
+ break;
+ }
+ for (final QueueIdAndStatisticsMap queueStat : queueStats) {
+ if (queueStat.getQueueId() != null) {
+ final FlowCapableNodeConnectorQueueStatistics statChild =
+ new FlowCapableNodeConnectorQueueStatisticsBuilder(queueStat).build();
+ final FlowCapableNodeConnectorQueueStatisticsDataBuilder statBuild =
+ new FlowCapableNodeConnectorQueueStatisticsDataBuilder();
+ statBuild.setFlowCapableNodeConnectorQueueStatistics(statChild);
+ final QueueKey qKey = new QueueKey(queueStat.getQueueId());
+ final InstanceIdentifier<Queue> queueIdent = nodeIdent
+ .child(NodeConnector.class, new NodeConnectorKey(queueStat.getNodeConnectorId()))
+ .augmentation(FlowCapableNodeConnector.class)
+ .child(Queue.class, qKey);
+ final InstanceIdentifier<FlowCapableNodeConnectorQueueStatisticsData> queueStatIdent = queueIdent.augmentation(FlowCapableNodeConnectorQueueStatisticsData.class);
+ existQueueKeys.remove(qKey);
+ tx.merge(LogicalDatastoreType.OPERATIONAL, queueIdent, new QueueBuilder().setKey(qKey).build());
+ tx.put(LogicalDatastoreType.OPERATIONAL, queueStatIdent, statBuild.build());
+ }
+ }
}
- if ( ! fNode.isPresent()) {
- LOG.trace("Read Operational/DS for Node fail! Node {} doesn't exist.", nodeIdent);
+ }
+
+ private void deleteAllNotPresentedNodes(final InstanceIdentifier<Node> nodeIdent,
+ final ReadWriteTransaction tx, final Map<QueueKey, NodeConnectorKey> existQueueKeys) {
+
+ Preconditions.checkNotNull(nodeIdent);
+ Preconditions.checkNotNull(tx);
+
+ if (existQueueKeys == null) {
return;
}
- for (final QueueIdAndStatisticsMap queueEntry : queueStats) {
- final FlowCapableNodeConnectorQueueStatistics statChild =
- new FlowCapableNodeConnectorQueueStatisticsBuilder(queueEntry).build();
- final FlowCapableNodeConnectorQueueStatisticsDataBuilder statBuild =
- new FlowCapableNodeConnectorQueueStatisticsDataBuilder();
- statBuild.setFlowCapableNodeConnectorQueueStatistics(statChild);
- final QueueKey qKey = new QueueKey(queueEntry.getQueueId());
- final InstanceIdentifier<FlowCapableNodeConnectorQueueStatisticsData> queueStatIdent = nodeIdent
- .child(NodeConnector.class, new NodeConnectorKey(queueEntry.getNodeConnectorId()))
- .augmentation(FlowCapableNodeConnector.class)
- .child(Queue.class, qKey).augmentation(FlowCapableNodeConnectorQueueStatisticsData.class);
- trans.put(LogicalDatastoreType.OPERATIONAL, queueStatIdent, statBuild.build());
+ for (final Entry<QueueKey, NodeConnectorKey> entry : existQueueKeys.entrySet()) {
+ final InstanceIdentifier<Queue> queueIdent = nodeIdent.child(NodeConnector.class, entry.getValue())
+ .augmentation(FlowCapableNodeConnector.class).child(Queue.class, entry.getKey());
+ LOG.trace("Queue {} has to removed.", queueIdent);
+ Optional<Queue> delQueue = Optional.absent();
+ try {
+ delQueue = tx.read(LogicalDatastoreType.OPERATIONAL, queueIdent).checkedGet();
+ }
+ catch (final ReadFailedException e) {
+ // NOOP - probably another transaction delete that node
+ }
+ if (delQueue.isPresent()) {
+ tx.delete(LogicalDatastoreType.OPERATIONAL, queueIdent);
+ }
}
}
}
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import java.util.Set;
import org.opendaylight.controller.md.sal.binding.api.DataBroker;
import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
-import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
import org.opendaylight.controller.md.statistics.manager.StatNodeRegistration;
import org.opendaylight.controller.md.statistics.manager.StatPermCollector.StatCapabTypes;
import org.opendaylight.controller.md.statistics.manager.StatisticsManager;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemoved;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.meter.statistics.rev131111.NodeMeterFeatures;
import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
*
* Created: Aug 28, 2014
*/
-public class StatNodeRegistrationImpl implements StatNodeRegistration {
+public class StatNodeRegistrationImpl implements StatNodeRegistration, DataChangeListener {
private static final Logger LOG = LoggerFactory.getLogger(StatNodeRegistrationImpl.class);
Preconditions.checkArgument(db != null, "DataBroker can not be null!");
Preconditions.checkArgument(notificationService != null, "NotificationProviderService can not be null!");
notifListenerRegistration = notificationService.registerNotificationListener(this);
+ /* Build Path */
+ final InstanceIdentifier<FlowCapableNode> flowNodeWildCardIdentifier = InstanceIdentifier.create(Nodes.class)
+ .child(Node.class).augmentation(FlowCapableNode.class);
+ listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.OPERATIONAL,
+ flowNodeWildCardIdentifier, StatNodeRegistrationImpl.this, DataChangeScope.BASE);
}
@Override
maxCapTables = data.getMaxTables();
final Optional<Short> maxTables = Optional.<Short> of(maxCapTables);
-
- /* Meters management */
- final InstanceIdentifier<NodeMeterFeatures> meterFeaturesIdent = nodeIdent.augmentation(NodeMeterFeatures.class);
-
-
- Optional<NodeMeterFeatures> meterFeatures = Optional.absent();
- try {
- meterFeatures = tx.read(LogicalDatastoreType.OPERATIONAL, meterFeaturesIdent).checkedGet();
- }
- catch (final ReadFailedException e) {
- LOG.warn("Read NodeMeterFeatures {} fail!", meterFeaturesIdent, e);
- }
- if (meterFeatures.isPresent()) {
- statCapabTypes.add(StatCapabTypes.METER_STATS);
- }
manager.connectedNodeRegistration(nodeIdent,
Collections.unmodifiableList(statCapabTypes), maxTables.get());
}
@Override
public void onNodeRemoved(final NodeRemoved notification) {
+ Preconditions.checkNotNull(notification);
final NodeRef nodeRef = notification.getNodeRef();
final InstanceIdentifier<?> nodeRefIdent = nodeRef.getValue();
final InstanceIdentifier<Node> nodeIdent =
@Override
public void onNodeUpdated(final NodeUpdated notification) {
+ Preconditions.checkNotNull(notification);
final FlowCapableNodeUpdated newFlowNode =
notification.getAugmentation(FlowCapableNodeUpdated.class);
if (newFlowNode != null && newFlowNode.getSwitchFeatures() != null) {
connectFlowCapableNode(swichFeaturesIdent, switchFeatures, nodeIdent);
}
}
+
+ @Override
+ public void onDataChanged(final AsyncDataChangeEvent<InstanceIdentifier<?>, DataObject> changeEvent) {
+ Preconditions.checkNotNull(changeEvent,"Async ChangeEvent can not be null!");
+ /* All DataObjects for create */
+ final Set<InstanceIdentifier<?>> createdData = changeEvent.getCreatedData() != null
+ ? changeEvent.getCreatedData().keySet() : Collections.<InstanceIdentifier<?>> emptySet();
+
+ for (final InstanceIdentifier<?> entryKey : createdData) {
+ final InstanceIdentifier<Node> nodeIdent = entryKey
+ .firstIdentifierOf(Node.class);
+ if ( ! nodeIdent.isWildcarded()) {
+ final NodeRef nodeRef = new NodeRef(nodeIdent);
+ // FIXME: these calls is a job for handshake or for inventory manager
+ /* check Group and Meter future */
+ manager.getRpcMsgManager().getGroupFeaturesStat(nodeRef);
+ manager.getRpcMsgManager().getMeterFeaturesStat(nodeRef);
+ }
+ }
+ }
}
package org.opendaylight.controller.md.statistics.manager.impl;
+import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
statNetCollectorServ.shutdown();
}
+ @Override
+ public boolean hasActiveNodes() {
+ return ( ! statNodeHolder.isEmpty());
+ }
+
@Override
public boolean isProvidedFlowNodeActive(
final InstanceIdentifier<Node> flowNode) {
@Override
public boolean connectedNodeRegistration(final InstanceIdentifier<Node> ident,
final List<StatCapabTypes> statTypes, final Short nrOfSwitchTables) {
- if (ident.isWildcarded()) {
- LOG.warn("FlowCapableNode IstanceIdentifier {} registration can not be wildcarded!", ident);
- } else {
+ if (isNodeIdentValidForUse(ident)) {
if ( ! statNodeHolder.containsKey(ident)) {
synchronized (statNodeHolderLock) {
final boolean startStatCollecting = statNodeHolder.size() == 0;
@Override
public boolean disconnectedNodeUnregistration(final InstanceIdentifier<Node> ident) {
- if (ident.isWildcarded()) {
- LOG.warn("FlowCapableNode IstanceIdentifier {} unregistration can not be wildcarded!", ident);
- } else {
+ if (isNodeIdentValidForUse(ident)) {
if (statNodeHolder.containsKey(ident)) {
synchronized (statNodeHolderLock) {
if (statNodeHolder.containsKey(ident)) {
return false;
}
+ @Override
+ public boolean registerAdditionalNodeFeature(final InstanceIdentifier<Node> ident,
+ final StatCapabTypes statCapab) {
+ if (isNodeIdentValidForUse(ident)) {
+ if ( ! statNodeHolder.containsKey(ident)) {
+ return false;
+ }
+ final StatNodeInfoHolder statNode = statNodeHolder.get(ident);
+ if ( ! statNode.getStatMarkers().contains(statCapab)) {
+ synchronized (statNodeHolderLock) {
+ if ( ! statNode.getStatMarkers().contains(statCapab)) {
+ final List<StatCapabTypes> statCapabForEdit = new ArrayList<>(statNode.getStatMarkers());
+ statCapabForEdit.add(statCapab);
+ final StatNodeInfoHolder nodeInfoHolder = new StatNodeInfoHolder(statNode.getNodeRef(),
+ Collections.unmodifiableList(statCapabForEdit), statNode.getMaxTables());
+
+ final Map<InstanceIdentifier<Node>, StatNodeInfoHolder> statNodes =
+ new HashMap<>(statNodeHolder);
+ statNodes.put(ident, nodeInfoHolder);
+ statNodeHolder = Collections.unmodifiableMap(statNodes);
+ }
+ }
+ }
+ }
+ return true;
+ }
+
@Override
public void collectNextStatistics() {
if (wakeMe) {
break;
case GROUP_STATS:
LOG.trace("STAT-MANAGER-collecting GROUP-STATS for NodeRef {}", actualNodeRef);
- manager.getRpcMsgManager().getGroupFeaturesStat(actualNodeRef);
- waitingForNotification();
manager.getRpcMsgManager().getAllGroupsConfStats(actualNodeRef);
waitingForNotification();
manager.getRpcMsgManager().getAllGroupsStat(actualNodeRef);
break;
case METER_STATS:
LOG.trace("STAT-MANAGER-collecting METER-STATS for NodeRef {}", actualNodeRef);
- manager.getRpcMsgManager().getMeterFeaturesStat(actualNodeRef);
- waitingForNotification();
manager.getRpcMsgManager().getAllMeterConfigStat(actualNodeRef);
waitingForNotification();
manager.getRpcMsgManager().getAllMetersStat(actualNodeRef);
}
}
- @Override
- public boolean hasActiveNodes() {
- return ( ! statNodeHolder.isEmpty());
+ private boolean isNodeIdentValidForUse(final InstanceIdentifier<Node> ident) {
+ if (ident == null) {
+ LOG.warn("FlowCapableNode InstanceIdentifier {} can not be null!");
+ return false;
+ }
+ if (ident.isWildcarded()) {
+ LOG.warn("FlowCapableNode InstanceIdentifier {} can not be wildcarded!", ident);
+ return false;
+ }
+ return true;
}
}
@Override
public Void call() throws Exception {
- Preconditions.checkArgument(nodeRef != null, "NodeRef can not be null!");
final GetGroupDescriptionInputBuilder builder =
new GetGroupDescriptionInputBuilder();
builder.setNode(nodeRef);
return;
}
}
- LOG.debug("Node {} has not removed.", nodeIdent);
+ LOG.debug("Node {} has not been removed.", nodeIdent);
+ }
+
+ @Override
+ public void registerAdditionalNodeFeature(final InstanceIdentifier<Node> nodeIdent,
+ final StatCapabTypes statCapab) {
+ for (final StatPermCollector collector : statCollectors) {
+ if (collector.registerAdditionalNodeFeature(nodeIdent, statCapab)) {
+ return;
+ }
+ }
+ LOG.debug("Node {} has not been extended for feature {}!", nodeIdent, statCapab);
}
/* Getter internal Statistic Manager Job Classes */
*/
package org.opendaylight.controller.netconf.cli;
+import static com.google.common.base.Throwables.getStackTraceAsString;
+
import com.google.common.base.Preconditions;
import java.io.IOException;
import java.net.InetAddress;
}
private static void handleException(final Exception e, final String message) {
- // FIXME syserr the exception and stacktrace
+ System.console().writer().println(String.format("Error %s cause %s", message, getStackTraceAsString(e.fillInStackTrace())));
}
private static void writeStatus(final ConsoleIO io, final String blueprint, final Object... args) {
import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
import org.opendaylight.controller.sal.connect.netconf.NetconfDevice;
+import org.opendaylight.controller.sal.connect.netconf.NetconfDevice.SchemaResourcesDTO;
+import org.opendaylight.controller.sal.connect.netconf.NetconfStateSchemas.NetconfStateSchemasResolverImpl;
import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.schema.mapping.NetconfMessageTransformer;
import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaContextFactory;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceFilter;
+import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.util.FilesystemSchemaSourceCache;
import org.opendaylight.yangtools.yang.model.util.repo.AbstractCachingSchemaSourceProvider;
import org.opendaylight.yangtools.yang.model.util.repo.FilesystemSchemaCachingProvider;
import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProvider;
import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProviders;
+import org.opendaylight.yangtools.yang.parser.repo.SharedSchemaRepository;
+import org.opendaylight.yangtools.yang.parser.util.TextToASTTransformer;
/**
* Manages connect/disconnect to 1 remote device
private final NioEventLoopGroup nettyThreadGroup;
private final NetconfClientDispatcherImpl netconfClientDispatcher;
+ private static final String CACHE = "cache/schema";
+
// Connection
private NetconfDeviceConnectionHandler handler;
private NetconfDevice device;
handler = new NetconfDeviceConnectionHandler(commandDispatcher, schemaContextRegistry,
console, name);
- device = NetconfDevice.createNetconfDevice(deviceId, getGlobalNetconfSchemaProvider(), executor, handler);
+
+ final SharedSchemaRepository repository = new SharedSchemaRepository("repo");
+ final SchemaContextFactory schemaContextFactory = repository.createSchemaContextFactory(SchemaSourceFilter.ALWAYS_ACCEPT);
+ final FilesystemSchemaSourceCache<YangTextSchemaSource> cache = new FilesystemSchemaSourceCache<>(repository, YangTextSchemaSource.class, new File(CACHE));
+ repository.registerSchemaSourceListener(cache);
+ repository.registerSchemaSourceListener(TextToASTTransformer.create(repository, repository));
+
+ device = new NetconfDevice(new SchemaResourcesDTO(repository, schemaContextFactory, new NetconfStateSchemasResolverImpl()),
+ deviceId, handler, executor, new NetconfMessageTransformer());
listener = new NetconfDeviceCommunicator(deviceId, device);
configBuilder.withSessionListener(listener);
listener.initializeRemoteConnection(netconfClientDispatcher, configBuilder.build());
import io.netty.channel.local.LocalAddress;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.util.concurrent.GlobalEventExecutor;
+import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.file.Files;
public static final String USERNAME = "user";
public static final String PASSWORD = "pwd";
+ private File sshKeyPair;
private SshProxyServer sshProxyServer;
private ExecutorService nioExec;
@Before
public void setUp() throws Exception {
+ sshKeyPair = Files.createTempFile("sshKeyPair", ".pem").toFile();
+ sshKeyPair.deleteOnExit();
nioExec = Executors.newFixedThreadPool(1);
clientGroup = new NioEventLoopGroup();
minaTimerEx = Executors.newScheduledThreadPool(1);
return true;
}
})
- .setKeyPairProvider(new PEMGeneratorHostKeyProvider(Files.createTempFile("prefix", "suffix").toAbsolutePath().toString()))
+ .setKeyPairProvider(new PEMGeneratorHostKeyProvider(sshKeyPair.toPath().toAbsolutePath().toString()))
.setIdleTimeout(Integer.MAX_VALUE)
.createSshProxyServerConfiguration());
}
import org.slf4j.LoggerFactory;
public abstract class AbstractNetconfSession<S extends NetconfSession, L extends NetconfSessionListener<S>> extends AbstractProtocolSession<NetconfMessage> implements NetconfSession, NetconfExiSession {
- private static final Logger logger = LoggerFactory.getLogger(AbstractNetconfSession.class);
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractNetconfSession.class);
private final L sessionListener;
private final long sessionId;
private boolean up = false;
this.sessionListener = sessionListener;
this.channel = channel;
this.sessionId = sessionId;
- logger.debug("Session {} created", sessionId);
+ LOG.debug("Session {} created", sessionId);
}
protected abstract S thisInstance();
@Override
protected void handleMessage(final NetconfMessage netconfMessage) {
- logger.debug("handling incoming message");
+ LOG.debug("handling incoming message");
sessionListener.onMessage(thisInstance(), netconfMessage);
}
public ChannelFuture sendMessage(final NetconfMessage netconfMessage) {
final ChannelFuture future = channel.writeAndFlush(netconfMessage);
if (delayedEncoder != null) {
- replaceMessageEncoder(delayedEncoder);
- delayedEncoder = null;
+ replaceMessageEncoder(delayedEncoder);
+ delayedEncoder = null;
}
return future;
@Override
protected void endOfInput() {
- logger.debug("Session {} end of input detected while session was in state {}", toString(), isUp() ? "up"
+ LOG.debug("Session {} end of input detected while session was in state {}", toString(), isUp() ? "up"
: "initialized");
if (isUp()) {
this.sessionListener.onSessionDown(thisInstance(), new IOException("End of input detected. Close the session."));
@Override
protected void sessionUp() {
- logger.debug("Session {} up", toString());
+ LOG.debug("Session {} up", toString());
sessionListener.onSessionUp(thisInstance());
this.up = true;
}
try {
exiParams = EXIParameters.fromXmlElement(XmlElement.fromDomDocument(startExiMessage.getDocument()));
} catch (final EXIOptionsException e) {
- logger.warn("Unable to parse EXI parameters from {} om session {}", XmlUtil.toString(startExiMessage.getDocument()), this, e);
+ LOG.warn("Unable to parse EXI parameters from {} om session {}", XmlUtil.toString(startExiMessage.getDocument()), this, e);
throw new IllegalArgumentException(e);
}
final NetconfEXICodec exiCodec = new NetconfEXICodec(exiParams.getOptions());
addExiHandlers(exiCodec);
- logger.debug("Session {} EXI handlers added to pipeline", this);
+ LOG.debug("Session {} EXI handlers added to pipeline", this);
}
protected abstract void addExiHandlers(NetconfEXICodec exiCodec);
import org.w3c.dom.NodeList;
public abstract class AbstractNetconfSessionNegotiator<P extends NetconfSessionPreferences, S extends AbstractNetconfSession<S, L>, L extends NetconfSessionListener<S>>
-extends AbstractSessionNegotiator<NetconfHelloMessage, S> {
+ extends AbstractSessionNegotiator<NetconfHelloMessage, S> {
- private static final Logger logger = LoggerFactory.getLogger(AbstractNetconfSessionNegotiator.class);
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractNetconfSessionNegotiator.class);
public static final String NAME_OF_EXCEPTION_HANDLER = "lastExceptionHandler";
@Override
public void operationComplete(Future<? super Channel> future) {
Preconditions.checkState(future.isSuccess(), "Ssl handshake was not successful");
- logger.debug("Ssl handshake complete");
+ LOG.debug("Ssl handshake complete");
start();
}
});
private void start() {
final NetconfMessage helloMessage = this.sessionPreferences.getHelloMessage();
- logger.debug("Session negotiation started with hello message {} on channel {}", XmlUtil.toString(helloMessage.getDocument()), channel);
+ LOG.debug("Session negotiation started with hello message {} on channel {}", XmlUtil.toString(helloMessage.getDocument()), channel);
channel.pipeline().addLast(NAME_OF_EXCEPTION_HANDLER, new ExceptionHandlingInboundChannelHandler());
synchronized (this) {
if (state != State.ESTABLISHED) {
- logger.debug("Connection timeout after {}, session is in state {}", timeout, state);
+ LOG.debug("Connection timeout after {}, session is in state {}", timeout, state);
// Do not fail negotiation if promise is done or canceled
// It would result in setting result of the promise second time and that throws exception
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if(future.isSuccess()) {
- logger.debug("Channel {} closed: success", future.channel());
+ LOG.debug("Channel {} closed: success", future.channel());
} else {
- logger.warn("Channel {} closed: fail", future.channel());
+ LOG.warn("Channel {} closed: fail", future.channel());
}
}
});
protected abstract S getSession(L sessionListener, Channel channel, NetconfHelloMessage message) throws NetconfDocumentedException;
private synchronized void changeState(final State newState) {
- logger.debug("Changing state from : {} to : {} for channel: {}", state, newState, channel);
+ LOG.debug("Changing state from : {} to : {} for channel: {}", state, newState, channel);
Preconditions.checkState(isStateChangePermitted(state, newState), "Cannot change state from %s to %s for chanel %s", state,
newState, channel);
this.state = newState;
if (state == State.OPEN_WAIT && newState == State.FAILED) {
return true;
}
- logger.debug("Transition from {} to {} is not allowed", state, newState);
+ LOG.debug("Transition from {} to {} is not allowed", state, newState);
return false;
}
private final class ExceptionHandlingInboundChannelHandler extends ChannelInboundHandlerAdapter {
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
- logger.warn("An exception occurred during negotiation with {}", channel.remoteAddress(), cause);
+ LOG.warn("An exception occurred during negotiation with {}", channel.remoteAddress(), cause);
cancelTimeout();
negotiationFailed(cause);
changeState(State.FAILED);
package org.opendaylight.controller.netconf.nettyutil.handler;
+import com.google.common.base.Preconditions;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.MessageToByteEncoder;
-
import org.opendaylight.controller.netconf.util.messages.NetconfMessageConstants;
import org.opendaylight.controller.netconf.util.messages.NetconfMessageHeader;
-import com.google.common.base.Preconditions;
-
public class ChunkedFramingMechanismEncoder extends MessageToByteEncoder<ByteBuf> {
public static final int DEFAULT_CHUNK_SIZE = 8192;
public static final int MIN_CHUNK_SIZE = 128;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.MessageToByteEncoder;
-
import org.opendaylight.controller.netconf.util.messages.NetconfMessageConstants;
public class EOMFramingMechanismEncoder extends MessageToByteEncoder<ByteBuf> {
package org.opendaylight.controller.netconf.nettyutil.handler;
+import io.netty.buffer.ByteBuf;
+import io.netty.handler.codec.MessageToByteEncoder;
import org.opendaylight.controller.netconf.util.messages.FramingMechanism;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import io.netty.buffer.ByteBuf;
-import io.netty.handler.codec.MessageToByteEncoder;
-
public final class FramingMechanismHandlerFactory {
- private static final Logger logger = LoggerFactory.getLogger(FramingMechanismHandlerFactory.class);
+ private static final Logger LOG = LoggerFactory.getLogger(FramingMechanismHandlerFactory.class);
private FramingMechanismHandlerFactory() {
// not called - private constructor for utility class
}
public static MessageToByteEncoder<ByteBuf> createHandler(FramingMechanism framingMechanism) {
- logger.debug("{} framing mechanism was selected.", framingMechanism);
+ LOG.debug("{} framing mechanism was selected.", framingMechanism);
if (framingMechanism == FramingMechanism.EOM) {
return new EOMFramingMechanismEncoder();
} else {
package org.opendaylight.controller.netconf.nettyutil.handler;
-import java.util.List;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
import io.netty.buffer.ByteBuf;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.ByteToMessageDecoder;
+import java.util.List;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class NetconfChunkAggregator extends ByteToMessageDecoder {
- private final static Logger logger = LoggerFactory.getLogger(NetconfChunkAggregator.class);
+ private final static Logger LOG = LoggerFactory.getLogger(NetconfChunkAggregator.class);
private static final String GOT_PARAM_WHILE_WAITING_FOR_PARAM = "Got byte {} while waiting for {}";
private static final String GOT_PARAM_WHILE_WAITING_FOR_PARAM_PARAM = "Got byte {} while waiting for {}-{}";
public static final int DEFAULT_MAXIMUM_CHUNK_SIZE = 16 * 1024 * 1024;
private void checkNewLine(byte b,String errorMessage){
if (b != '\n') {
- logger.debug(GOT_PARAM_WHILE_WAITING_FOR_PARAM, b, (byte)'\n');
+ LOG.debug(GOT_PARAM_WHILE_WAITING_FOR_PARAM, b, (byte)'\n');
throw new IllegalStateException(errorMessage);
}
}
private void checkHash(byte b,String errorMessage){
if (b != '#') {
- logger.debug(GOT_PARAM_WHILE_WAITING_FOR_PARAM, b, (byte)'#');
+ LOG.debug(GOT_PARAM_WHILE_WAITING_FOR_PARAM, b, (byte)'#');
throw new IllegalStateException(errorMessage);
}
}
private void checkChunkSize(){
if (chunkSize > maxChunkSize) {
- logger.debug("Parsed chunk size {}, maximum allowed is {}", chunkSize, maxChunkSize);
+ LOG.debug("Parsed chunk size {}, maximum allowed is {}", chunkSize, maxChunkSize);
throw new IllegalStateException("Maximum chunk size exceeded");
}
}
if (b < '0' || b > '9') {
- logger.debug(GOT_PARAM_WHILE_WAITING_FOR_PARAM_PARAM, b, (byte)'0', (byte)'9');
+ LOG.debug(GOT_PARAM_WHILE_WAITING_FOR_PARAM_PARAM, b, (byte)'0', (byte)'9');
throw new IllegalStateException("Invalid chunk size encountered");
}
* comes through.
*/
if (in.readableBytes() < chunkSize) {
- logger.debug("Buffer has {} bytes, need {} to complete chunk", in.readableBytes(), chunkSize);
+ LOG.debug("Buffer has {} bytes, need {} to complete chunk", in.readableBytes(), chunkSize);
in.discardReadBytes();
return;
}
} else if (b == '#') {
state = State.FOOTER_FOUR;
} else {
- logger.debug(GOT_PARAM_WHILE_WAITING_FOR_PARAM_PARAM, b, (byte) '#', (byte) '1', (byte) '9');
+ LOG.debug(GOT_PARAM_WHILE_WAITING_FOR_PARAM_PARAM, b, (byte) '#', (byte) '1', (byte) '9');
throw new IllegalStateException("Malformed chunk footer encountered (byte 2)");
}
}
private static int processHeaderLengthFirst(byte b) {
if (!isHeaderLengthFirst(b)) {
- logger.debug(GOT_PARAM_WHILE_WAITING_FOR_PARAM_PARAM, b, (byte)'1', (byte)'9');
+ LOG.debug(GOT_PARAM_WHILE_WAITING_FOR_PARAM_PARAM, b, (byte)'1', (byte)'9');
throw new IllegalStateException("Invalid chunk size encountered (byte 0)");
}
*/
package org.opendaylight.controller.netconf.nettyutil.handler;
+import com.google.common.base.Preconditions;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.ByteBufInputStream;
+import io.netty.buffer.ByteBufUtil;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.handler.codec.ByteToMessageDecoder;
import java.io.InputStream;
import java.util.List;
-
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMResult;
import javax.xml.transform.sax.SAXTransformerFactory;
import javax.xml.transform.sax.TransformerHandler;
-
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.openexi.sax.EXIReader;
import org.slf4j.Logger;
import org.w3c.dom.Document;
import org.xml.sax.InputSource;
-import com.google.common.base.Preconditions;
-
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.ByteBufInputStream;
-import io.netty.buffer.ByteBufUtil;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.handler.codec.ByteToMessageDecoder;
-
public final class NetconfEXIToMessageDecoder extends ByteToMessageDecoder {
private static final Logger LOG = LoggerFactory.getLogger(NetconfEXIToMessageDecoder.class);
final EXIReader r = codec.getReader();
final SAXTransformerFactory transformerFactory
- = (SAXTransformerFactory) TransformerFactory.newInstance();
+ = (SAXTransformerFactory) TransformerFactory.newInstance();
final TransformerHandler handler = transformerFactory.newTransformerHandler();
r.setContentHandler(handler);
*/
package org.opendaylight.controller.netconf.nettyutil.handler;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Charsets;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
-
import java.io.IOException;
-
import javax.xml.transform.TransformerException;
-
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Charsets;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-
/**
* Customized NetconfMessageToXMLEncoder that serializes additional header with
* session metadata along with
*/
package org.opendaylight.controller.netconf.nettyutil.handler;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Optional;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufOutputStream;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.MessageToByteEncoder;
-
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
-
import javax.xml.transform.OutputKeys;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
-
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.util.xml.XmlUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Comment;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
-
public class NetconfMessageToXMLEncoder extends MessageToByteEncoder<NetconfMessage> {
private static final Logger LOG = LoggerFactory.getLogger(NetconfMessageToXMLEncoder.class);
private static final TransformerFactory FACTORY = TransformerFactory.newInstance();
*/
package org.opendaylight.controller.netconf.nettyutil.handler;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufUtil;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.ByteToMessageDecoder;
-
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
-
import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
import org.opendaylight.controller.netconf.api.NetconfMessage;
import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Charsets;
-import com.google.common.collect.ImmutableList;
import org.xml.sax.SAXException;
/**
*/
package org.opendaylight.controller.netconf.nettyutil.handler.exi;
+import com.google.common.base.Preconditions;
import org.opendaylight.controller.netconf.util.xml.XmlElement;
import org.openexi.proc.common.AlignmentType;
import org.openexi.proc.common.EXIOptions;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
-import com.google.common.base.Preconditions;
-
public final class EXIParameters {
private static final String EXI_PARAMETER_ALIGNMENT = "alignment";
static final String EXI_PARAMETER_BYTE_ALIGNED = "byte-aligned";
package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
+import com.google.common.base.Preconditions;
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelOutboundHandlerAdapter;
+import io.netty.channel.ChannelPromise;
import java.io.IOException;
import java.net.SocketAddress;
-
import org.apache.sshd.ClientChannel;
import org.apache.sshd.ClientSession;
import org.apache.sshd.SshClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.base.Preconditions;
-
-import io.netty.buffer.ByteBuf;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelOutboundHandlerAdapter;
-import io.netty.channel.ChannelPromise;
-
/**
* Netty SSH handler class. Acts as interface between Netty and SSH library.
*/
public class AsyncSshHandler extends ChannelOutboundHandlerAdapter {
- private static final Logger logger = LoggerFactory.getLogger(AsyncSshHandler.class);
+ private static final Logger LOG = LoggerFactory.getLogger(AsyncSshHandler.class);
public static final String SUBSYSTEM = "netconf";
public static final SshClient DEFAULT_CLIENT = SshClient.setUpDefaultClient();
}
private void startSsh(final ChannelHandlerContext ctx, final SocketAddress address) {
- logger.debug("Starting SSH to {} on channel: {}", address, ctx.channel());
+ LOG.debug("Starting SSH to {} on channel: {}", address, ctx.channel());
final ConnectFuture sshConnectionFuture = sshClient.connect(authenticationHandler.getUsername(), address);
sshConnectionFuture.addListener(new SshFutureListener<ConnectFuture>() {
private synchronized void handleSshSessionCreated(final ConnectFuture future, final ChannelHandlerContext ctx) {
try {
- logger.trace("SSH session created on channel: {}", ctx.channel());
+ LOG.trace("SSH session created on channel: {}", ctx.channel());
session = future.getSession();
final AuthFuture authenticateFuture = authenticationHandler.authenticate(session);
private synchronized void handleSshAuthenticated(final ClientSession session, final ChannelHandlerContext ctx) {
try {
- logger.debug("SSH session authenticated on channel: {}, server version: {}", ctx.channel(), session.getServerVersion());
+ LOG.debug("SSH session authenticated on channel: {}, server version: {}", ctx.channel(), session.getServerVersion());
channel = session.createSubsystemChannel(SUBSYSTEM);
channel.setStreaming(ClientChannel.Streaming.Async);
}
private synchronized void handleSshChanelOpened(final ChannelHandlerContext ctx) {
- logger.trace("SSH subsystem channel opened successfully on channel: {}", ctx.channel());
+ LOG.trace("SSH subsystem channel opened successfully on channel: {}", ctx.channel());
connectPromise.setSuccess();
- connectPromise = null;
// TODO we should also read from error stream and at least log from that
}
private synchronized void handleSshSetupFailure(final ChannelHandlerContext ctx, final Throwable e) {
- logger.warn("Unable to setup SSH connection on channel: {}", ctx.channel(), e);
- connectPromise.setFailure(e);
- connectPromise = null;
- throw new IllegalStateException("Unable to setup SSH connection on channel: " + ctx.channel(), e);
+ LOG.warn("Unable to setup SSH connection on channel: {}", ctx.channel(), e);
+ disconnect(ctx, ctx.newPromise());
+
+ // If the promise is not yet done, we have failed with initial connect and set connectPromise to failure
+ if(!connectPromise.isDone()) {
+ connectPromise.setFailure(e);
+ }
}
@Override
@Override
public synchronized void disconnect(final ChannelHandlerContext ctx, final ChannelPromise promise) {
+ // Super disconnect is necessary in this case since we are using NioSocketChannel and it needs to cleanup its resources
+ // e.g. Socket that it tries to open in its constructor (https://bugs.opendaylight.org/show_bug.cgi?id=2430)
+ // TODO better solution would be to implement custom ChannelFactory + Channel that will use mina SSH lib internally: port this to custom channel implementation
+ try {
+ super.disconnect(ctx, ctx.newPromise());
+ } catch (final Exception e) {
+ LOG.warn("Unable to cleanup all resources for channel: {}. Ignoring.", ctx.channel(), e);
+ }
+
if(sshReadAsyncListener != null) {
sshReadAsyncListener.close();
}
});
}
+ // If we have already succeeded and the session was dropped after, we need to fire inactive to notify reconnect logic
+ if(connectPromise.isSuccess()) {
+ ctx.fireChannelInactive();
+ }
+
channel = null;
- promise.setSuccess();
- logger.debug("SSH session closed on channel: {}", ctx.channel());
- ctx.fireChannelInactive();
+ promise.setSuccess();
+ LOG.debug("SSH session closed on channel: {}", ctx.channel());
}
}
*/
public final class AsyncSshHandlerReader implements SshFutureListener<IoReadFuture>, AutoCloseable {
- private static final Logger logger = LoggerFactory.getLogger(AsyncSshHandler.class);
+ private static final Logger LOG = LoggerFactory.getLogger(AsyncSshHandlerReader.class);
private static final int BUFFER_SIZE = 8192;
if(future.getException() != null) {
if(asyncOut.isClosed() || asyncOut.isClosing()) {
// Ssh dropped
- logger.debug("Ssh session dropped on channel: {}", channelId, future.getException());
+ LOG.debug("Ssh session dropped on channel: {}", channelId, future.getException());
} else {
- logger.warn("Exception while reading from SSH remote on channel {}", channelId, future.getException());
+ LOG.warn("Exception while reading from SSH remote on channel {}", channelId, future.getException());
}
invokeDisconnect();
return;
if (future.getRead() > 0) {
final ByteBuf msg = Unpooled.wrappedBuffer(buf.array(), 0, future.getRead());
- if(logger.isTraceEnabled()) {
- logger.trace("Reading message on channel: {}, message: {}", channelId, AsyncSshHandlerWriter.byteBufToString(msg));
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("Reading message on channel: {}, message: {}", channelId, AsyncSshHandlerWriter.byteBufToString(msg));
}
readHandler.onMessageRead(msg);
*/
public final class AsyncSshHandlerWriter implements AutoCloseable {
- private static final Logger logger = LoggerFactory
+ private static final Logger LOG = LoggerFactory
.getLogger(AsyncSshHandlerWriter.class);
// public static final int MAX_PENDING_WRITES = 1000;
private void writeWithPendingDetection(final ChannelHandlerContext ctx, final ChannelPromise promise, final ByteBuf byteBufMsg) {
try {
- if (logger.isTraceEnabled()) {
- logger.trace("Writing request on channel: {}, message: {}", ctx.channel(), byteBufToString(byteBufMsg));
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Writing request on channel: {}, message: {}", ctx.channel(), byteBufToString(byteBufMsg));
}
asyncIn.write(toBuffer(byteBufMsg)).addListener(new SshFutureListener<IoWriteFuture>() {
- @Override
- public void operationComplete(final IoWriteFuture future) {
- if (logger.isTraceEnabled()) {
- logger.trace("Ssh write request finished on channel: {} with result: {}: and ex:{}, message: {}",
+ @Override
+ public void operationComplete(final IoWriteFuture future) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Ssh write request finished on channel: {} with result: {}: and ex:{}, message: {}",
ctx.channel(), future.isWritten(), future.getException(), byteBufToString(byteBufMsg));
- }
-
- // Notify success or failure
- if (future.isWritten()) {
- promise.setSuccess();
- } else {
- logger.warn("Ssh write request failed on channel: {} for message: {}", ctx.channel(), byteBufToString(byteBufMsg), future.getException());
- promise.setFailure(future.getException());
- }
-
- // Not needed anymore, release
- byteBufMsg.release();
-
- // Check pending queue and schedule next
- // At this time we are guaranteed that we are not in pending state anymore so the next request should succeed
- writePendingIfAny();
- }
- });
+ }
+
+ // Notify success or failure
+ if (future.isWritten()) {
+ promise.setSuccess();
+ } else {
+ LOG.warn("Ssh write request failed on channel: {} for message: {}", ctx.channel(), byteBufToString(byteBufMsg), future.getException());
+ promise.setFailure(future.getException());
+ }
+
+ // Not needed anymore, release
+ byteBufMsg.release();
+
+ // Check pending queue and schedule next
+ // At this time we are guaranteed that we are not in pending state anymore so the next request should succeed
+ writePendingIfAny();
+ }
+ });
} catch (final WritePendingException e) {
queueRequest(ctx, byteBufMsg, promise);
}
// In case of pending, reschedule next message from queue
final PendingWriteRequest pendingWrite = pending.poll();
final ByteBuf msg = pendingWrite.msg;
- if (logger.isTraceEnabled()) {
- logger.trace("Writing pending request on channel: {}, message: {}", pendingWrite.ctx.channel(), byteBufToString(msg));
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Writing pending request on channel: {}, message: {}", pendingWrite.ctx.channel(), byteBufToString(msg));
}
writeWithPendingDetection(pendingWrite.ctx, pendingWrite.promise, msg);
private void queueRequest(final ChannelHandlerContext ctx, final ByteBuf msg, final ChannelPromise promise) {
// try {
- logger.debug("Write pending on channel: {}, queueing, current queue size: {}", ctx.channel(), pending.size());
- if (logger.isTraceEnabled()) {
- logger.trace("Queueing request due to pending: {}", byteBufToString(msg));
+ LOG.debug("Write pending on channel: {}, queueing, current queue size: {}", ctx.channel(), pending.size());
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Queueing request due to pending: {}", byteBufToString(msg));
}
new PendingWriteRequest(ctx, msg, promise).pend(pending);
// } catch (final Exception ex) {
-// logger.warn("Unable to queue write request on channel: {}. Setting fail for the request: {}", ctx.channel(), ex, byteBufToString(msg));
+// LOG.warn("Unable to queue write request on channel: {}. Setting fail for the request: {}", ctx.channel(), ex, byteBufToString(msg));
// msg.release();
// promise.setFailure(ex);
// }
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
+
import com.google.common.collect.Lists;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
fullOptions.setPreservePIs(true);
return Arrays.asList(new Object[][]{
- {noChangeXml, new EXIOptions()},
- {fullOptionsXml, fullOptions},
+ {noChangeXml, new EXIOptions()},
+ {fullOptionsXml, fullOptions},
});
}
fullOptions.setPreservePIs(true);
return Arrays.asList(new Object[][]{
- {noChangeXml, new EXIOptions()},
- {fullOptionsXml, fullOptions},
+ {noChangeXml, new EXIOptions()},
+ {fullOptionsXml, fullOptions},
});
}
package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
-import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
import static org.mockito.Mockito.verifyZeroInteractions;
+
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPromise;
+import io.netty.channel.DefaultChannelPromise;
import java.io.IOException;
import java.net.SocketAddress;
import org.apache.sshd.ClientChannel;
doReturn(ctx).when(ctx).fireChannelActive();
doReturn(ctx).when(ctx).fireChannelInactive();
doReturn(ctx).when(ctx).fireChannelRead(anyObject());
+ doReturn(mock(ChannelFuture.class)).when(ctx).disconnect(any(ChannelPromise.class));
doReturn(getMockedPromise()).when(ctx).newPromise();
}
verify(subsystemChannel).setStreaming(ClientChannel.Streaming.Async);
verify(promise).setSuccess();
- verifyNoMoreInteractions(promise);
verify(ctx).fireChannelActive();
}
verify(subsystemChannel).setStreaming(ClientChannel.Streaming.Async);
- try {
- sshChannelOpenListener.operationComplete(getFailedOpenFuture());
- fail("Exception expected");
- } catch (final Exception e) {
- verify(promise).setFailure(any(Throwable.class));
- verifyNoMoreInteractions(promise);
- // TODO should ctx.channelInactive be called if we throw exception ?
- }
+ sshChannelOpenListener.operationComplete(getFailedOpenFuture());
+ verify(promise).setFailure(any(Throwable.class));
}
@Test
final AuthFuture authFuture = getFailedAuthFuture();
- try {
- sshAuthListener.operationComplete(authFuture);
- fail("Exception expected");
- } catch (final Exception e) {
- verify(promise).setFailure(any(Throwable.class));
- verifyNoMoreInteractions(promise);
- // TODO should ctx.channelInactive be called ?
- }
+ sshAuthListener.operationComplete(authFuture);
+ verify(promise).setFailure(any(Throwable.class));
}
private AuthFuture getFailedAuthFuture() {
asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
final ConnectFuture connectFuture = getFailedConnectFuture();
- try {
- sshConnectListener.operationComplete(connectFuture);
- fail("Exception expected");
- } catch (final Exception e) {
- verify(promise).setFailure(any(Throwable.class));
- verifyNoMoreInteractions(promise);
- // TODO should ctx.channelInactive be called ?
- }
+ sshConnectListener.operationComplete(connectFuture);
+ verify(promise).setFailure(any(Throwable.class));
}
private ConnectFuture getFailedConnectFuture() {
}
private ChannelPromise getMockedPromise() {
- final ChannelPromise promise = mock(ChannelPromise.class);
- doReturn(promise).when(promise).setSuccess();
- doReturn(promise).when(promise).setFailure(any(Throwable.class));
- return promise;
+ return spy(new DefaultChannelPromise(channel));
}
private static abstract class SuccessFutureListener<T extends SshFuture<T>> implements FutureCallback<SshFutureListener<T>> {
*/
public class RemoteNetconfCommand implements AsyncCommand, SessionAware {
- private static final Logger logger = LoggerFactory.getLogger(RemoteNetconfCommand.class);
+ private static final Logger LOG = LoggerFactory.getLogger(RemoteNetconfCommand.class);
private final EventLoopGroup clientEventGroup;
private final LocalAddress localAddress;
@Override
public void start(final Environment env) throws IOException {
- logger.trace("Establishing internal connection to netconf server for client: {}", getClientAddress());
+ LOG.trace("Establishing internal connection to netconf server for client: {}", getClientAddress());
final Bootstrap clientBootstrap = new Bootstrap();
clientBootstrap.group(clientEventGroup).channel(LocalChannel.class);
if(future.isSuccess()) {
clientChannel = clientChannelFuture.channel();
} else {
- logger.warn("Unable to establish internal connection to netconf server for client: {}", getClientAddress());
+ LOG.warn("Unable to establish internal connection to netconf server for client: {}", getClientAddress());
Preconditions.checkNotNull(callback, "Exit callback must be set");
callback.onExit(1, "Unable to establish internal connection to netconf server for client: "+ getClientAddress());
}
@Override
public void destroy() {
- logger.trace("Releasing internal connection to netconf server for client: {} on channel: {}",
+ LOG.trace("Releasing internal connection to netconf server for client: {} on channel: {}",
getClientAddress(), clientChannel);
clientChannelFuture.cancel(true);
@Override
public void operationComplete(final ChannelFuture future) throws Exception {
if (future.isSuccess() == false) {
- logger.warn("Unable to release internal connection to netconf server on channel: {}", clientChannel);
+ LOG.warn("Unable to release internal connection to netconf server on channel: {}", clientChannel);
}
}
});
*/
final class SshProxyClientHandler extends ChannelInboundHandlerAdapter {
- private static final Logger logger = LoggerFactory.getLogger(SshProxyClientHandler.class);
+ private static final Logger LOG = LoggerFactory.getLogger(SshProxyClientHandler.class);
private final IoInputStream in;
private final IoOutputStream out;
}, new AsyncSshHandlerReader.ReadMsgHandler() {
@Override
public void onMessageRead(final ByteBuf msg) {
- if(logger.isTraceEnabled()) {
- logger.trace("Forwarding message for client: {} on channel: {}, message: {}",
+ if(LOG.isTraceEnabled()) {
+ LOG.trace("Forwarding message for client: {} on channel: {}, message: {}",
netconfHelloMessageAdditionalHeader.getAddress(), ctx.channel(), AsyncSshHandlerWriter.byteBufToString(msg));
}
// Just forward to delegate
@Override
public void channelRead(final ChannelHandlerContext ctx, final Object msg) throws Exception {
- asyncSshHandlerWriter.write(ctx, msg, ctx.newPromise());
+ asyncSshHandlerWriter.write(ctx, msg, ctx.newPromise());
}
@Override
public void channelInactive(final ChannelHandlerContext ctx) throws Exception {
- logger.debug("Internal connection to netconf server was dropped for client: {} on channel: ",
+ LOG.debug("Internal connection to netconf server was dropped for client: {} on channel: ",
netconfHelloMessageAdditionalHeader.getAddress(), ctx.channel());
callback.onExit(1, "Internal connection to netconf server was dropped for client: " +
netconfHelloMessageAdditionalHeader.getAddress() + " on channel: " + ctx.channel());
private static Map<String, String> getProperties(final SshProxyServerConfiguration sshProxyServerConfiguration) {
return new HashMap<String, String>()
- {{
- put(ServerFactoryManager.IDLE_TIMEOUT, String.valueOf(sshProxyServerConfiguration.getIdleTimeout()));
- // TODO make auth timeout configurable on its own
- put(ServerFactoryManager.AUTH_TIMEOUT, String.valueOf(sshProxyServerConfiguration.getIdleTimeout()));
- }};
+ {
+ {
+ put(ServerFactoryManager.IDLE_TIMEOUT, String.valueOf(sshProxyServerConfiguration.getIdleTimeout()));
+ // TODO make auth timeout configurable on its own
+ put(ServerFactoryManager.AUTH_TIMEOUT, String.valueOf(sshProxyServerConfiguration.getIdleTimeout()));
+ }
+ };
}
@Override
import org.slf4j.LoggerFactory;
final class AuthProviderTracker implements ServiceTrackerCustomizer<AuthProvider, AuthProvider>, PasswordAuthenticator {
- private static final Logger logger = LoggerFactory.getLogger(AuthProviderTracker.class);
+ private static final Logger LOG = LoggerFactory.getLogger(AuthProviderTracker.class);
private final BundleContext bundleContext;
@Override
public AuthProvider addingService(final ServiceReference<AuthProvider> reference) {
- logger.trace("Service {} added", reference);
+ LOG.trace("Service {} added", reference);
final AuthProvider authService = bundleContext.getService(reference);
final Integer newServicePreference = getPreference(reference);
if(isBetter(newServicePreference)) {
final AuthProvider authService = bundleContext.getService(reference);
final Integer newServicePreference = getPreference(reference);
if(isBetter(newServicePreference)) {
- logger.trace("Replacing modified service {} in netconf SSH.", reference);
+ LOG.trace("Replacing modified service {} in netconf SSH.", reference);
this.authProvider = authService;
}
}
@Override
public void removedService(final ServiceReference<AuthProvider> reference, final AuthProvider service) {
- logger.trace("Removing service {} from netconf SSH. " +
- "SSH won't authenticate users until AuthProvider service will be started.", reference);
+ LOG.trace("Removing service {} from netconf SSH. {}", reference,
+ " SSH won't authenticate users until AuthProvider service will be started.");
maxPreference = null;
this.authProvider = null;
}
import org.slf4j.LoggerFactory;
public class NetconfSSHActivator implements BundleActivator {
- private static final Logger logger = LoggerFactory.getLogger(NetconfSSHActivator.class);
+ private static final Logger LOG = LoggerFactory.getLogger(NetconfSSHActivator.class);
private static final java.lang.String ALGORITHM = "RSA";
private static final int KEY_SIZE = 4096;
final Optional<InetSocketAddress> maybeSshSocketAddress = NetconfConfigUtil.extractNetconfServerAddress(bundleContext, InfixProp.ssh);
if (maybeSshSocketAddress.isPresent() == false) {
- logger.trace("SSH bridge not configured");
+ LOG.trace("SSH bridge not configured");
return null;
}
final InetSocketAddress sshSocketAddress = maybeSshSocketAddress.get();
- logger.trace("Starting netconf SSH bridge at {}", sshSocketAddress);
+ LOG.trace("Starting netconf SSH bridge at {}", sshSocketAddress);
final LocalAddress localAddress = NetconfConfigUtil.getNetconfLocalAddress();
* the server.
*/
public class EchoClient extends Thread {
- private static final Logger logger = LoggerFactory.getLogger(EchoClient.class);
+ private static final Logger LOG = LoggerFactory.getLogger(EchoClient.class);
private final ChannelInitializer<LocalChannel> channelInitializer;
// Wait until the connection is closed.
f.channel().closeFuture().sync();
} catch (Exception e) {
- logger.error("Error in client", e);
+ LOG.error("Error in client", e);
throw new RuntimeException("Error in client", e);
} finally {
// Shut down the event loop to terminate all threads.
- logger.info("Client is shutting down");
+ LOG.info("Client is shutting down");
group.shutdownGracefully();
}
}
* the server.
*/
public class EchoClientHandler extends ChannelInboundHandlerAdapter implements ChannelFutureListener {
- private static final Logger logger = LoggerFactory.getLogger(EchoClientHandler.class);
+ private static final Logger LOG = LoggerFactory.getLogger(EchoClientHandler.class);
private ChannelHandlerContext ctx;
private final StringBuilder fromServer = new StringBuilder();
@Override
public synchronized void channelActive(ChannelHandlerContext ctx) {
checkState(this.ctx == null);
- logger.info("channelActive");
+ LOG.info("channelActive");
this.ctx = ctx;
state = State.CONNECTED;
}
ByteBuf bb = (ByteBuf) msg;
String string = bb.toString(Charsets.UTF_8);
fromServer.append(string);
- logger.info(">{}", string);
+ LOG.info(">{}", string);
bb.release();
}
@Override
public synchronized void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
// Close the connection when an exception is raised.
- logger.warn("Unexpected exception from downstream.", cause);
+ LOG.warn("Unexpected exception from downstream.", cause);
checkState(this.ctx.equals(ctx));
ctx.close();
this.ctx = null;
public synchronized void operationComplete(ChannelFuture future) throws Exception {
checkState(state == State.CONNECTING);
if (future.isSuccess()) {
- logger.trace("Successfully connected, state will be switched in channelActive");
+ LOG.trace("Successfully connected, state will be switched in channelActive");
} else {
state = State.FAILED_TO_CONNECT;
}
* Echoes back any received data from a client.
*/
public class EchoServer implements Runnable {
- private static final Logger logger = LoggerFactory.getLogger(EchoServer.class);
+ private static final Logger LOG = LoggerFactory.getLogger(EchoServer.class);
public void run() {
// Configure the server.
if (message == null || "exit".equalsIgnoreCase(message)) {
break;
}
- logger.debug("Got '{}'", message);
+ LOG.debug("Got '{}'", message);
clientHandler.write(message);
} while (true);
System.exit(0);
@Sharable
public class EchoServerHandler extends ChannelInboundHandlerAdapter {
- private static final Logger logger = LoggerFactory.getLogger(EchoServerHandler.class.getName());
+ private static final Logger LOG = LoggerFactory.getLogger(EchoServerHandler.class);
private String fromLastNewLine = "";
private final Splitter splitter = Splitter.onPattern("\r?\n");
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
- logger.debug("sleep start");
+ LOG.debug("sleep start");
Thread.sleep(1000);
- logger.debug("sleep done");
+ LOG.debug("sleep done");
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
ByteBuf byteBuf = (ByteBuf) msg;
String message = byteBuf.toString(Charsets.UTF_8);
- logger.info("writing back '{}'", message);
+ LOG.info("writing back '{}'", message);
ctx.write(msg);
fromLastNewLine += message;
for (String line : splitter.split(fromLastNewLine)) {
if ("quit".equals(line)) {
- logger.info("closing server ctx");
+ LOG.info("closing server ctx");
ctx.flush();
ctx.close();
break;
@Override
public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
- logger.debug("flushing");
+ LOG.debug("flushing");
ctx.flush();
}
}
import org.slf4j.LoggerFactory;
public class ProxyServerHandler extends ChannelInboundHandlerAdapter {
- private static final Logger logger = LoggerFactory.getLogger(ProxyServerHandler.class.getName());
+ private static final Logger LOG = LoggerFactory.getLogger(ProxyServerHandler.class);
private final Bootstrap clientBootstrap;
private final LocalAddress localAddress;
@Override
public void channelInactive(ChannelHandlerContext ctx) {
- logger.info("channelInactive - closing client connection");
+ LOG.info("channelInactive - closing client connection");
clientChannel.close();
}
@Override
public void channelRead(ChannelHandlerContext ctx, final Object msg) {
- logger.debug("Writing to client {}", msg);
+ LOG.debug("Writing to client {}", msg);
clientChannel.write(msg);
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
- logger.debug("flushing");
+ LOG.debug("flushing");
clientChannel.flush();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
// Close the connection when an exception is raised.
- logger.warn("Unexpected exception from downstream.", cause);
+ LOG.warn("Unexpected exception from downstream.", cause);
ctx.close();
}
}
class ProxyClientHandler extends ChannelInboundHandlerAdapter {
- private static final Logger logger = LoggerFactory.getLogger(ProxyClientHandler.class);
+ private static final Logger LOG = LoggerFactory.getLogger(ProxyClientHandler.class);
private final ChannelHandlerContext remoteCtx;
@Override
public void channelActive(ChannelHandlerContext ctx) {
- logger.info("client active");
+ LOG.info("client active");
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
ByteBuf bb = (ByteBuf) msg;
- logger.info(">{}", bb.toString(Charsets.UTF_8));
+ LOG.info(">{}", bb.toString(Charsets.UTF_8));
remoteCtx.write(msg);
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
- logger.debug("Flushing server ctx");
+ LOG.debug("Flushing server ctx");
remoteCtx.flush();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
// Close the connection when an exception is raised.
- logger.warn("Unexpected exception from downstream", cause);
+ LOG.warn("Unexpected exception from downstream", cause);
ctx.close();
}
// called both when local or remote connection dies
@Override
public void channelInactive(ChannelHandlerContext ctx) {
- logger.debug("channelInactive() called, closing remote client ctx");
+ LOG.debug("channelInactive() called, closing remote client ctx");
remoteCtx.close();
}
}
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.util.HashedWheelTimer;
+import java.io.File;
import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.util.concurrent.ExecutorService;
import org.slf4j.LoggerFactory;
public class SSHTest {
- public static final Logger logger = LoggerFactory.getLogger(SSHTest.class);
+ private static final Logger LOG = LoggerFactory.getLogger(SSHTest.class);
public static final String AHOJ = "ahoj\n";
private static EventLoopGroup nettyGroup;
@Test
public void test() throws Exception {
+ File sshKeyPair = Files.createTempFile("sshKeyPair", ".pem").toFile();
+ sshKeyPair.deleteOnExit();
new Thread(new EchoServer(), "EchoServer").start();
final InetSocketAddress addr = new InetSocketAddress("127.0.0.1", 10831);
public boolean authenticate(final String username, final String password, final ServerSession session) {
return true;
}
- }).setKeyPairProvider(new PEMGeneratorHostKeyProvider(Files.createTempFile("prefix", "suffix").toAbsolutePath().toString())).setIdleTimeout(Integer.MAX_VALUE).createSshProxyServerConfiguration());
+ }).setKeyPairProvider(new PEMGeneratorHostKeyProvider(sshKeyPair.toPath().toAbsolutePath().toString())).setIdleTimeout(Integer.MAX_VALUE).createSshProxyServerConfiguration());
final EchoClientHandler echoClientHandler = connectClient(addr);
Thread.sleep(500);
}
assertTrue(echoClientHandler.isConnected());
- logger.info("connected, writing to client");
+ LOG.info("connected, writing to client");
echoClientHandler.write(AHOJ);
// check that server sent back the same string
final String read = echoClientHandler.read();
assertTrue(read + " should end with " + AHOJ, read.endsWith(AHOJ));
} finally {
- logger.info("Closing socket");
+ LOG.info("Closing socket");
sshProxyServer.close();
}
}
Thread.sleep(100);
}
assertFalse(echoClientHandler.isConnected());
- assertEquals(State.CONNECTION_CLOSED, echoClientHandler.getState());
+ assertEquals(State.FAILED_TO_CONNECT, echoClientHandler.getState());
}
}
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
+import java.io.File;
import java.net.InetSocketAddress;
import java.nio.file.Files;
import java.util.concurrent.ExecutorService;
private static final String PASSWORD = "netconf";
private static final String HOST = "127.0.0.1";
private static final int PORT = 1830;
- private static final Logger logger = LoggerFactory.getLogger(SSHServerTest.class);
+ private static final Logger LOG = LoggerFactory.getLogger(SSHServerTest.class);
+ private File sshKeyPair;
private SshProxyServer server;
@Mock
@Before
public void setUp() throws Exception {
+ sshKeyPair = Files.createTempFile("sshKeyPair", ".pem").toFile();
+ sshKeyPair.deleteOnExit();
+
MockitoAnnotations.initMocks(this);
doReturn(null).when(mockedContext).createFilter(anyString());
doNothing().when(mockedContext).addServiceListener(any(ServiceListener.class), anyString());
doReturn(new ServiceReference[0]).when(mockedContext).getServiceReferences(anyString(), anyString());
- logger.info("Creating SSH server");
+ LOG.info("Creating SSH server");
final InetSocketAddress addr = InetSocketAddress.createUnresolved(HOST, PORT);
server = new SshProxyServer(minaTimerEx, clientGroup, nioExec);
public boolean authenticate(final String username, final String password, final ServerSession session) {
return true;
}
- }).setKeyPairProvider(new PEMGeneratorHostKeyProvider(Files.createTempFile("prefix", "suffix").toAbsolutePath().toString())).setIdleTimeout(Integer.MAX_VALUE).createSshProxyServerConfiguration());
- logger.info("SSH server started on " + PORT);
+ }).setKeyPairProvider(new PEMGeneratorHostKeyProvider(sshKeyPair.toPath().toAbsolutePath().toString())).setIdleTimeout(Integer.MAX_VALUE).createSshProxyServerConfiguration());
+ LOG.info("SSH server started on {}", PORT);
}
@Test
import org.slf4j.LoggerFactory;
public class ProxyServerHandler extends ChannelInboundHandlerAdapter {
- private static final Logger logger = LoggerFactory.getLogger(ProxyServerHandler.class.getName());
+ private static final Logger LOG = LoggerFactory.getLogger(ProxyServerHandler.class);
private final Bootstrap clientBootstrap;
private final LocalAddress localAddress;
@Override
public void channelInactive(ChannelHandlerContext ctx) {
- logger.trace("channelInactive - closing client channel");
+ LOG.trace("channelInactive - closing client channel");
clientChannel.close();
}
@Override
public void channelRead(ChannelHandlerContext ctx, final Object msg) {
- logger.trace("Writing to client channel");
+ LOG.trace("Writing to client channel");
clientChannel.write(msg);
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
- logger.trace("Flushing client channel");
+ LOG.trace("Flushing client channel");
clientChannel.flush();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
// Close the connection when an exception is raised.
- logger.warn("Unexpected exception from downstream.", cause);
+ LOG.warn("Unexpected exception from downstream.", cause);
ctx.close();
}
}
class ProxyClientHandler extends ChannelInboundHandlerAdapter {
- private static final Logger logger = LoggerFactory.getLogger(ProxyClientHandler.class);
+ private static final Logger LOG = LoggerFactory.getLogger(ProxyClientHandler.class);
private final ChannelHandlerContext remoteCtx;
private ChannelHandlerContext localCtx;
@Override
public void channelActive(ChannelHandlerContext ctx) {
checkState(this.localCtx == null);
- logger.trace("Client channel active");
+ LOG.trace("Client channel active");
this.localCtx = ctx;
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
- logger.trace("Forwarding message");
+ LOG.trace("Forwarding message");
remoteCtx.write(msg);
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
- logger.trace("Flushing remote ctx");
+ LOG.trace("Flushing remote ctx");
remoteCtx.flush();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
// Close the connection when an exception is raised.
- logger.warn("Unexpected exception from downstream", cause);
+ LOG.warn("Unexpected exception from downstream", cause);
checkState(this.localCtx.equals(ctx));
ctx.close();
}
// called both when local or remote connection dies
@Override
public void channelInactive(ChannelHandlerContext ctx) {
- logger.trace("channelInactive() called, closing remote client ctx");
+ LOG.trace("channelInactive() called, closing remote client ctx");
remoteCtx.close();
}
* Opens TCP port specified in config.ini, creates bridge between this port and local netconf server.
*/
public class NetconfTCPActivator implements BundleActivator {
- private static final Logger logger = LoggerFactory.getLogger(NetconfTCPActivator.class);
+ private static final Logger LOG = LoggerFactory.getLogger(NetconfTCPActivator.class);
private ProxyServer proxyServer;
@Override
public void start(BundleContext context) {
final Optional<InetSocketAddress> maybeAddress = NetconfConfigUtil.extractNetconfServerAddress(context, InfixProp.tcp);
if (maybeAddress.isPresent() == false) {
- logger.debug("Netconf tcp server is not configured to start");
+ LOG.debug("Netconf tcp server is not configured to start");
return;
}
InetSocketAddress address = maybeAddress.get();
if (address.getAddress().isAnyLocalAddress()) {
- logger.warn("Unprotected netconf TCP address is configured to ANY local address. This is a security risk. " +
- "Consider changing {} to 127.0.0.1", NetconfConfigUtil.getNetconfServerAddressKey(InfixProp.tcp));
+ LOG.warn("Unprotected netconf TCP address is configured to ANY local address. This is a security risk. Consider changing {} to 127.0.0.1",
+ NetconfConfigUtil.getNetconfServerAddressKey(InfixProp.tcp));
}
- logger.info("Starting TCP netconf server at {}", address);
+ LOG.info("Starting TCP netconf server at {}", address);
proxyServer = new ProxyServer(address, NetconfConfigUtil.getNetconfLocalAddress());
}
*/
package org.opendaylight.controller.netconf.auth.usermanager;
+import com.google.common.annotations.VisibleForTesting;
import org.opendaylight.controller.netconf.auth.AuthProvider;
import org.opendaylight.controller.sal.authorization.AuthResultEnum;
import org.opendaylight.controller.usermanager.IUserManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.google.common.annotations.VisibleForTesting;
-
/**
* AuthProvider implementation delegating to AD-SAL UserManager instance.
*/
public class AuthProviderImpl implements AuthProvider {
- private static final Logger logger = LoggerFactory.getLogger(AuthProviderImpl.class);
+ private static final Logger LOG = LoggerFactory.getLogger(AuthProviderImpl.class);
private IUserManager nullableUserManager;
final ServiceTrackerCustomizer<IUserManager, IUserManager> customizer = new ServiceTrackerCustomizer<IUserManager, IUserManager>() {
@Override
public IUserManager addingService(final ServiceReference<IUserManager> reference) {
- logger.trace("UerManager {} added", reference);
+ LOG.trace("UerManager {} added", reference);
nullableUserManager = bundleContext.getService(reference);
return nullableUserManager;
}
@Override
public void modifiedService(final ServiceReference<IUserManager> reference, final IUserManager service) {
- logger.trace("Replacing modified UerManager {}", reference);
+ LOG.trace("Replacing modified UerManager {}", reference);
nullableUserManager = service;
}
@Override
public void removedService(final ServiceReference<IUserManager> reference, final IUserManager service) {
- logger.trace("Removing UerManager {}. This AuthProvider will fail to authenticate every time", reference);
+ LOG.trace("Removing UerManager {}. This AuthProvider will fail to authenticate every time", reference);
synchronized (AuthProviderImpl.this) {
nullableUserManager = null;
}
@Override
public synchronized boolean authenticated(final String username, final String password) {
if (nullableUserManager == null) {
- logger.warn("Cannot authenticate user '{}', user manager service is missing", username);
+ LOG.warn("Cannot authenticate user '{}', user manager service is missing", username);
throw new IllegalStateException("User manager service is not available");
}
final AuthResultEnum authResult = nullableUserManager.authenticate(username, password);
- logger.debug("Authentication result for user '{}' : {}", username, authResult);
+ LOG.debug("Authentication result for user '{}' : {}", username, authResult);
return authResult.equals(AuthResultEnum.AUTH_ACCEPT) || authResult.equals(AuthResultEnum.AUTH_ACCEPT_LOC);
}
<modules>
<module>netconf-api</module>
- <!--FIXME make compilable-->
- <!--<module>netconf-cli</module>-->
+ <module>netconf-cli</module>
<module>netconf-config</module>
<module>netconf-impl</module>
<module>config-netconf-connector</module>
if (loadBalancerPoolInterface.neutronLoadBalancerPoolExists(singleton.getLoadBalancerPoolID())) {
throw new BadRequestException("LoadBalancerPool UUID already exists");
}
- loadBalancerPoolInterface.addNeutronLoadBalancerPool(singleton);
-
Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolAware.class, this, null);
if (instances != null) {
for (Object instance : instances) {