Fix FindBugs warnings in sal-distributed-datastore and enable enforcement 48/47548/6
authorTom Pantelis <tpanteli@brocade.com>
Tue, 25 Oct 2016 16:27:57 +0000 (12:27 -0400)
committerTom Pantelis <tpanteli@brocade.com>
Wed, 16 Nov 2016 10:50:11 +0000 (10:50 +0000)
Several warnings were suppressed via annotation with justification provided.
Other warnings that were fixed:
 - remove redundant implements in several classes
 - "The referenced methods have names that differ only by capitalization"
   warnings. This is checked across all classes for consistency. The main
   offender was getTransactionID vs. getTransactionId. I changed all methods
   to getTransactionId and associated fields to transactionId.
 - unsynchronized access to a field where access is synchronized
   elsewhere (in DataTreeChangeListenerProxy and DatastoreContextIntrospector).
 - catching Exception instead of catching more specific exception types that
   are thrown from the try block.
 - unconfirmed casts - verify via Preconditions check to avoid warning
 - unnecessarily calling toString() on a String instance
 - synchronizing an AtomicInteger instance (in ThreePhaseCommitCohortProxy) -
   not an issue in this case but changed to synchronize a separate Object
   in lieu of supressing the warning.
 - unsynchronized to SimpleDateFormat which isn't thread-safe (in ShardStats).
 - potential null-pointer access of 'shard' in ShardStats - changed to pass
   'shard' to the ctor in lieu of setter.
 - calling String#getBytes w/o specifying encoding (in DataTreeModificationOutput).
 - privileged access to create ClassLoader in ActorSystemProviderImpl although not
   likely a SecurityManager would ever be present.

Change-Id: I0a87208f3f200fbe4f78e950c21419fbab154d94
Signed-off-by: Tom Pantelis <tpanteli@brocade.com>
52 files changed:
opendaylight/md-sal/sal-distributed-datastore/pom.xml
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/AbstractDOMBroker.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/ConcurrentDOMDataBroker.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/ModuleShardBackendResolver.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CohortEntry.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataChangeListener.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerRegistrationActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerRegistrationActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContextIntrospector.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreSnapshotRestore.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionChain.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LocalTransactionFactoryImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardCommitCoordinator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardDataTree.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardWriteTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/SimpleShardDataTreeCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/ConfigurationImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/config/ModuleConfig.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/DistributedEntityOwnershipService.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipListenerActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipShard.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/entityownership/EntityOwnershipShardCommitCoordinator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardMBeanFactory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStats.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbstractThreePhaseCommitMessage.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModifications.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DatastoreSnapshot.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ForwardedReadyTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransactionSerializer.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/RegisterDataTreeChangeListener.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/FrontendShardDataTreeSnapshotMetadata.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/persisted/MetadataShardDataTreeSnapshot.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/AbstractShardManagerCreator.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManager.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/DataTreeModificationOutput.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/config/yang/config/actor_system_provider/impl/ActorSystemProviderImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTransactionFailureTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStatsTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/BatchedModificationsTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/messages/ReadyLocalTransactionSerializerTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/shardmanager/ShardManagerTest.java

index 394d381..b416744 100644 (file)
           <propertyExpansion>checkstyle.violationSeverity=error</propertyExpansion>
         </configuration>
       </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <failOnError>true</failOnError>
+        </configuration>
+      </plugin>
       <plugin>
         <groupId>org.jacoco</groupId>
         <artifactId>jacoco-maven-plugin</artifactId>
index 45b82dd..d17de22 100644 (file)
@@ -33,7 +33,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 abstract class AbstractDOMBroker extends AbstractDOMTransactionFactory<DOMStore>
-        implements DOMDataBroker, AutoCloseable {
+        implements DOMDataBroker {
 
     private static final Logger LOG = LoggerFactory.getLogger(AbstractDOMBroker.class);
 
index 2eeb955..b978147 100644 (file)
@@ -15,6 +15,7 @@ import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
@@ -197,6 +198,10 @@ public class ConcurrentDOMDataBroker extends AbstractDOMBroker implements DOMDat
         Futures.addCallback(commitFuture, futureCallback, MoreExecutors.directExecutor());
     }
 
+    @SuppressFBWarnings(value = "BC_UNCONFIRMED_CAST_OF_RETURN_VALUE",
+            justification = "Pertains to the assignment of the 'clientException' var. FindBugs flags this as an "
+                + "uncomfirmed cast but the generic type in TransactionCommitFailedExceptionMapper is "
+                + "TransactionCommitFailedException and thus should be deemed as confirmed.")
     private static void handleException(final AsyncNotifyingSettableFuture clientSubmitFuture,
             final DOMDataWriteTransaction transaction,
             final Collection<DOMStoreThreePhaseCommitCohort> cohorts,
index de115f0..a101896 100644 (file)
@@ -15,6 +15,7 @@ import com.google.common.collect.BiMap;
 import com.google.common.collect.ImmutableBiMap;
 import com.google.common.collect.ImmutableBiMap.Builder;
 import com.google.common.primitives.UnsignedLong;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CompletionStage;
 import java.util.concurrent.TimeUnit;
@@ -43,6 +44,8 @@ import scala.compat.java8.FutureConverters;
  *
  * @author Robert Varga
  */
+@SuppressFBWarnings(value = "NP_NONNULL_PARAM_VIOLATION",
+                    justification = "Pertains to the NULL_FUTURE field below. Null is allowed and is intended")
 final class ModuleShardBackendResolver extends BackendInfoResolver<ShardBackendInfo> {
     private static final CompletableFuture<ShardBackendInfo> NULL_FUTURE = CompletableFuture.completedFuture(null);
     private static final Logger LOG = LoggerFactory.getLogger(ModuleShardBackendResolver.class);
index f585bb6..84535b5 100644 (file)
@@ -21,7 +21,7 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification
 
 final class CohortEntry {
     private final ReadWriteShardDataTreeTransaction transaction;
-    private final TransactionIdentifier transactionID;
+    private final TransactionIdentifier transactionId;
     private final short clientVersion;
 
     private RuntimeException lastBatchedModificationsException;
@@ -33,13 +33,13 @@ final class CohortEntry {
 
     private CohortEntry(final ReadWriteShardDataTreeTransaction transaction, final short clientVersion) {
         this.transaction = Preconditions.checkNotNull(transaction);
-        this.transactionID = transaction.getId();
+        this.transactionId = transaction.getId();
         this.clientVersion = clientVersion;
     }
 
     private CohortEntry(final ShardDataTreeCohort cohort, final short clientVersion) {
         this.cohort = Preconditions.checkNotNull(cohort);
-        this.transactionID = cohort.getIdentifier();
+        this.transactionId = cohort.getIdentifier();
         this.transaction = null;
         this.clientVersion = clientVersion;
     }
@@ -52,8 +52,8 @@ final class CohortEntry {
         return new CohortEntry(cohort, clientVersion);
     }
 
-    TransactionIdentifier getTransactionID() {
-        return transactionID;
+    TransactionIdentifier getTransactionId() {
+        return transactionId;
     }
 
     short getClientVersion() {
@@ -118,7 +118,7 @@ final class CohortEntry {
 
         if (cohortDecorator != null) {
             // Call the hook for unit tests.
-            cohort = cohortDecorator.decorate(transactionID, cohort);
+            cohort = cohortDecorator.decorate(transactionId, cohort);
         }
     }
 
@@ -149,7 +149,7 @@ final class CohortEntry {
     @Override
     public String toString() {
         final StringBuilder builder = new StringBuilder();
-        builder.append("CohortEntry [transactionID=").append(transactionID).append(", doImmediateCommit=")
+        builder.append("CohortEntry [transactionId=").append(transactionId).append(", doImmediateCommit=")
                 .append(doImmediateCommit).append("]");
         return builder.toString();
     }
index b79f304..e1f2777 100644 (file)
@@ -11,6 +11,7 @@ package org.opendaylight.controller.cluster.datastore;
 import akka.actor.Props;
 import akka.japi.Creator;
 import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 import org.opendaylight.controller.cluster.datastore.messages.DataChanged;
 import org.opendaylight.controller.cluster.datastore.messages.DataChangedReply;
@@ -85,6 +86,8 @@ public class DataChangeListener extends AbstractUntypedActor {
     private static class DataChangeListenerCreator implements Creator<DataChangeListener> {
         private static final long serialVersionUID = 1L;
 
+        @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but we don't "
+                + "create remote instances of this actor and thus don't need it to be Serializable.")
         final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener;
 
         DataChangeListenerCreator(
index 06ad83c..89062dc 100644 (file)
@@ -11,6 +11,7 @@ package org.opendaylight.controller.cluster.datastore;
 import akka.actor.PoisonPill;
 import akka.actor.Props;
 import akka.japi.Creator;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistration;
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistrationReply;
@@ -56,6 +57,9 @@ public class DataChangeListenerRegistrationActor extends AbstractUntypedActor {
     private static class DataChangeListenerRegistrationCreator
                                             implements Creator<DataChangeListenerRegistrationActor> {
         private static final long serialVersionUID = 1L;
+
+        @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but we don't "
+                + "create remote instances of this actor and thus don't need it to be Serializable.")
         final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
                                                            NormalizedNode<?, ?>>> registration;
 
index b3e0fb6..bccb484 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore;
 import akka.actor.Props;
 import akka.japi.Creator;
 import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
 import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
@@ -75,6 +76,9 @@ final class DataTreeChangeListenerActor extends AbstractUntypedActor {
 
     private static final class DataTreeChangeListenerCreator implements Creator<DataTreeChangeListenerActor> {
         private static final long serialVersionUID = 1L;
+
+        @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but we don't "
+                + "create remote instances of this actor and thus don't need it to be Serializable.")
         private final DOMDataTreeChangeListener listener;
 
         DataTreeChangeListenerCreator(final DOMDataTreeChangeListener listener) {
index 4776f2e..8a9b466 100644 (file)
@@ -117,7 +117,7 @@ final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> ext
     }
 
     @VisibleForTesting
-    ActorSelection getListenerRegistrationActor() {
+    synchronized ActorSelection getListenerRegistrationActor() {
         return listenerRegistrationActor;
     }
 
index 79da59f..17deeeb 100644 (file)
@@ -11,6 +11,7 @@ import akka.actor.PoisonPill;
 import akka.actor.Props;
 import akka.japi.Creator;
 import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistration;
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistrationReply;
@@ -49,6 +50,9 @@ public final class DataTreeChangeListenerRegistrationActor extends AbstractUntyp
     private static final class DataTreeChangeListenerRegistrationCreator
             implements Creator<DataTreeChangeListenerRegistrationActor> {
         private static final long serialVersionUID = 1L;
+
+        @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but we don't "
+                + "create remote instances of this actor and thus don't need it to be Serializable.")
         final ListenerRegistration<DOMDataTreeChangeListener> registration;
 
         DataTreeChangeListenerRegistrationCreator(ListenerRegistration<DOMDataTreeChangeListener> registration) {
index 1f09118..6685f64 100644 (file)
@@ -27,6 +27,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import javax.annotation.concurrent.GuardedBy;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.text.WordUtils;
 import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
@@ -182,14 +183,16 @@ public class DatastoreContextIntrospector {
                 propertyName, type));
     }
 
+    @GuardedBy(value = "this")
     private DatastoreContext context;
+    @GuardedBy(value = "this")
     private Map<String, Object> currentProperties;
 
     public DatastoreContextIntrospector(DatastoreContext context) {
         this.context = context;
     }
 
-    public DatastoreContext getContext() {
+    public synchronized DatastoreContext getContext() {
         return context;
     }
 
index 87c8817..e736dda 100644 (file)
@@ -75,7 +75,7 @@ public class DatastoreSnapshotRestore {
             for (DatastoreSnapshot snapshot: snapshots) {
                 datastoreSnapshots.put(snapshot.getType(), snapshot);
             }
-        } catch (Exception e) {
+        } catch (ClassNotFoundException | IOException e) {
             LOG.error("Error reading clustered datastore restore file {}", restoreFile, e);
         } finally {
             if (!restoreFile.delete()) {
index cb6d44d..8bfad09 100644 (file)
@@ -98,7 +98,7 @@ public class DistributedDataStore implements DistributedDataStoreInterface, Sche
 
         ShardManagerCreator creator = new ShardManagerCreator().cluster(cluster).configuration(configuration)
                 .datastoreContextFactory(datastoreContextFactory)
-                .waitTillReadyCountdownLatch(waitTillReadyCountDownLatch)
+                .waitTillReadyCountDownLatch(waitTillReadyCountDownLatch)
                 .primaryShardInfoCache(primaryShardInfoCache)
                 .restoreFromSnapshot(restoreFromSnapshot);
 
index df57ede..de98dce 100644 (file)
@@ -84,6 +84,7 @@ final class LocalTransactionChain extends AbstractSnapshotBackedTransactionChain
     @Override
     public LocalThreePhaseCommitCohort onTransactionReady(@Nonnull DOMStoreWriteTransaction tx,
             @Nullable Exception operationError) {
+        Preconditions.checkArgument(tx instanceof SnapshotBackedWriteTransaction);
         if (operationError != null) {
             return new LocalChainThreePhaseCommitCohort((SnapshotBackedWriteTransaction<TransactionIdentifier>)tx,
                     operationError);
index 935db99..d01cae8 100644 (file)
@@ -75,6 +75,7 @@ final class LocalTransactionFactoryImpl extends TransactionReadyPrototype<Transa
     @Override
     public LocalThreePhaseCommitCohort onTransactionReady(@Nonnull DOMStoreWriteTransaction tx,
             @Nullable Exception operationError) {
+        Preconditions.checkArgument(tx instanceof SnapshotBackedWriteTransaction);
         if (operationError != null) {
             return new LocalThreePhaseCommitCohort(actorContext, leader,
                     (SnapshotBackedWriteTransaction<TransactionIdentifier>)tx, operationError);
index d52d3a3..c98e11d 100644 (file)
@@ -154,9 +154,7 @@ public class Shard extends RaftActor {
                     treeChangeListenerPublisher, dataChangeListenerPublisher, name);
         }
 
-        shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
-                datastoreContext.getDataStoreMXBeanType());
-        shardMBean.setShard(this);
+        shardMBean = ShardMBeanFactory.getShardStatsMBean(name, datastoreContext.getDataStoreMXBeanType(), this);
 
         if (isMetricsCaptureEnabled()) {
             getContext().become(new MeteringBehavior(this));
@@ -167,7 +165,7 @@ public class Shard extends RaftActor {
         setTransactionCommitTimeout();
 
         // create a notifier actor for each cluster member
-        roleChangeNotifier = createRoleChangeNotifier(name.toString());
+        roleChangeNotifier = createRoleChangeNotifier(name);
 
         appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
                 getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
@@ -254,8 +252,7 @@ public class Shard extends RaftActor {
                 updateSchemaContext((UpdateSchemaContext) message);
             } else if (message instanceof PeerAddressResolved) {
                 PeerAddressResolved resolved = (PeerAddressResolved) message;
-                setPeerAddress(resolved.getPeerId().toString(),
-                        resolved.getPeerAddress());
+                setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress());
             } else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
                 store.checkForExpiredTransactions(transactionCommitTimeout);
                 commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
@@ -331,12 +328,12 @@ public class Shard extends RaftActor {
 
     private void handleCommitTransaction(final CommitTransaction commit) {
         if (isLeader()) {
-            commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this);
+            commitCoordinator.handleCommit(commit.getTransactionId(), getSender(), this);
         } else {
             ActorSelection leader = getLeader();
             if (leader == null) {
                 messageRetrySupport.addMessageToRetry(commit, getSender(),
-                        "Could not commit transaction " + commit.getTransactionID());
+                        "Could not commit transaction " + commit.getTransactionId());
             } else {
                 LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
                 leader.forward(commit, getContext());
@@ -345,15 +342,15 @@ public class Shard extends RaftActor {
     }
 
     private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
-        LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
+        LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionId());
 
         if (isLeader()) {
-            commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
+            commitCoordinator.handleCanCommit(canCommit.getTransactionId(), getSender(), this);
         } else {
             ActorSelection leader = getLeader();
             if (leader == null) {
                 messageRetrySupport.addMessageToRetry(canCommit, getSender(),
-                        "Could not canCommit transaction " + canCommit.getTransactionID());
+                        "Could not canCommit transaction " + canCommit.getTransactionId());
             } else {
                 LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
                 leader.forward(canCommit, getContext());
@@ -367,7 +364,7 @@ public class Shard extends RaftActor {
             commitCoordinator.handleBatchedModifications(batched, sender, this);
         } catch (Exception e) {
             LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
-                    batched.getTransactionID(), e);
+                    batched.getTransactionId(), e);
             sender.tell(new akka.actor.Status.Failure(e), getSelf());
         }
     }
@@ -392,7 +389,7 @@ public class Shard extends RaftActor {
             ActorSelection leader = getLeader();
             if (!isLeaderActive || leader == null) {
                 messageRetrySupport.addMessageToRetry(batched, getSender(),
-                        "Could not commit transaction " + batched.getTransactionID());
+                        "Could not commit transaction " + batched.getTransactionId());
             } else {
                 // If this is not the first batch and leadership changed in between batched messages,
                 // we need to reconstruct previous BatchedModifications from the transaction
@@ -430,7 +427,7 @@ public class Shard extends RaftActor {
 
     @SuppressWarnings("checkstyle:IllegalCatch")
     private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
-        LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionID());
+        LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionId());
 
         boolean isLeaderActive = isLeaderActive();
         if (isLeader() && isLeaderActive) {
@@ -438,14 +435,14 @@ public class Shard extends RaftActor {
                 commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
             } catch (Exception e) {
                 LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
-                        message.getTransactionID(), e);
+                        message.getTransactionId(), e);
                 getSender().tell(new akka.actor.Status.Failure(e), getSelf());
             }
         } else {
             ActorSelection leader = getLeader();
             if (!isLeaderActive || leader == null) {
                 messageRetrySupport.addMessageToRetry(message, getSender(),
-                        "Could not commit transaction " + message.getTransactionID());
+                        "Could not commit transaction " + message.getTransactionId());
             } else {
                 LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
                 message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
@@ -455,7 +452,7 @@ public class Shard extends RaftActor {
     }
 
     private void handleForwardedReadyTransaction(final ForwardedReadyTransaction forwardedReady) {
-        LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionID());
+        LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionId());
 
         boolean isLeaderActive = isLeaderActive();
         if (isLeader() && isLeaderActive) {
@@ -464,11 +461,11 @@ public class Shard extends RaftActor {
             ActorSelection leader = getLeader();
             if (!isLeaderActive || leader == null) {
                 messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
-                        "Could not commit transaction " + forwardedReady.getTransactionID());
+                        "Could not commit transaction " + forwardedReady.getTransactionId());
             } else {
                 LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
 
-                ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionID(),
+                ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(),
                         forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
                 readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
                 leader.forward(readyLocal, getContext());
@@ -477,7 +474,7 @@ public class Shard extends RaftActor {
     }
 
     private void handleAbortTransaction(final AbortTransaction abort) {
-        doAbortTransaction(abort.getTransactionID(), getSender());
+        doAbortTransaction(abort.getTransactionId(), getSender());
     }
 
     void doAbortTransaction(final TransactionIdentifier transactionID, final ActorRef sender) {
index 7e3cd02..2773a3e 100644 (file)
@@ -23,6 +23,8 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
 import javax.annotation.Nonnull;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
@@ -101,11 +103,11 @@ final class ShardCommitCoordinator {
     void handleForwardedReadyTransaction(final ForwardedReadyTransaction ready, final ActorRef sender,
             final Shard shard) {
         log.debug("{}: Readying transaction {}, client version {}", name,
-                ready.getTransactionID(), ready.getTxnClientVersion());
+                ready.getTransactionId(), ready.getTxnClientVersion());
 
         final ShardDataTreeCohort cohort = ready.getTransaction().ready();
         final CohortEntry cohortEntry = CohortEntry.createReady(cohort, ready.getTxnClientVersion());
-        cohortCache.put(cohortEntry.getTransactionID(), cohortEntry);
+        cohortCache.put(cohortEntry.getTransactionId(), cohortEntry);
 
         if (ready.isDoImmediateCommit()) {
             cohortEntry.setDoImmediateCommit(true);
@@ -129,28 +131,28 @@ final class ShardCommitCoordinator {
      * @param sender the sender of the message
      */
     void handleBatchedModifications(final BatchedModifications batched, final ActorRef sender, final Shard shard) {
-        CohortEntry cohortEntry = cohortCache.get(batched.getTransactionID());
+        CohortEntry cohortEntry = cohortCache.get(batched.getTransactionId());
         if (cohortEntry == null) {
-            cohortEntry = CohortEntry.createOpen(dataTree.newReadWriteTransaction(batched.getTransactionID()),
+            cohortEntry = CohortEntry.createOpen(dataTree.newReadWriteTransaction(batched.getTransactionId()),
                 batched.getVersion());
-            cohortCache.put(cohortEntry.getTransactionID(), cohortEntry);
+            cohortCache.put(cohortEntry.getTransactionId(), cohortEntry);
         }
 
         if (log.isDebugEnabled()) {
             log.debug("{}: Applying {} batched modifications for Tx {}", name,
-                    batched.getModifications().size(), batched.getTransactionID());
+                    batched.getModifications().size(), batched.getTransactionId());
         }
 
         cohortEntry.applyModifications(batched.getModifications());
 
         if (batched.isReady()) {
             if (cohortEntry.getLastBatchedModificationsException() != null) {
-                cohortCache.remove(cohortEntry.getTransactionID());
+                cohortCache.remove(cohortEntry.getTransactionId());
                 throw cohortEntry.getLastBatchedModificationsException();
             }
 
             if (cohortEntry.getTotalBatchedModificationsReceived() != batched.getTotalMessagesSent()) {
-                cohortCache.remove(cohortEntry.getTransactionID());
+                cohortCache.remove(cohortEntry.getTransactionId());
                 throw new IllegalStateException(String.format(
                         "The total number of batched messages received %d does not match the number sent %d",
                         cohortEntry.getTotalBatchedModificationsReceived(), batched.getTotalMessagesSent()));
@@ -158,7 +160,7 @@ final class ShardCommitCoordinator {
 
             if (log.isDebugEnabled()) {
                 log.debug("{}: Readying Tx {}, client version {}", name,
-                        batched.getTransactionID(), batched.getVersion());
+                        batched.getTransactionId(), batched.getVersion());
             }
 
             cohortEntry.setDoImmediateCommit(batched.isDoCommitOnReady());
@@ -186,13 +188,13 @@ final class ShardCommitCoordinator {
      * @param shard the transaction's shard actor
      */
     void handleReadyLocalTransaction(final ReadyLocalTransaction message, final ActorRef sender, final Shard shard) {
-        final ShardDataTreeCohort cohort = dataTree.createReadyCohort(message.getTransactionID(),
+        final ShardDataTreeCohort cohort = dataTree.createReadyCohort(message.getTransactionId(),
             message.getModification());
         final CohortEntry cohortEntry = CohortEntry.createReady(cohort, DataStoreVersions.CURRENT_VERSION);
-        cohortCache.put(cohortEntry.getTransactionID(), cohortEntry);
+        cohortCache.put(cohortEntry.getTransactionId(), cohortEntry);
         cohortEntry.setDoImmediateCommit(message.isDoCommitOnReady());
 
-        log.debug("{}: Applying local modifications for Tx {}", name, message.getTransactionID());
+        log.debug("{}: Applying local modifications for Tx {}", name, message.getTransactionId());
 
         if (message.isDoCommitOnReady()) {
             cohortEntry.setReplySender(sender);
@@ -205,7 +207,7 @@ final class ShardCommitCoordinator {
 
     Collection<BatchedModifications> createForwardedBatchedModifications(final BatchedModifications from,
             final int maxModificationsPerBatch) {
-        CohortEntry cohortEntry = cohortCache.remove(from.getTransactionID());
+        CohortEntry cohortEntry = cohortCache.remove(from.getTransactionId());
         if (cohortEntry == null || cohortEntry.getTransaction() == null) {
             return Collections.singletonList(from);
         }
@@ -218,7 +220,7 @@ final class ShardCommitCoordinator {
             protected BatchedModifications getModifications() {
                 if (newModifications.isEmpty()
                         || newModifications.getLast().getModifications().size() >= maxModificationsPerBatch) {
-                    newModifications.add(new BatchedModifications(from.getTransactionID(), from.getVersion()));
+                    newModifications.add(new BatchedModifications(from.getTransactionId(), from.getVersion()));
                 }
 
                 return newModifications.getLast();
@@ -236,7 +238,7 @@ final class ShardCommitCoordinator {
         cohortEntry.canCommit(new FutureCallback<Void>() {
             @Override
             public void onSuccess(final Void result) {
-                log.debug("{}: canCommit for {}: success", name, cohortEntry.getTransactionID());
+                log.debug("{}: canCommit for {}: success", name, cohortEntry.getTransactionId());
 
                 if (cohortEntry.isDoImmediateCommit()) {
                     doCommit(cohortEntry);
@@ -250,9 +252,9 @@ final class ShardCommitCoordinator {
             @Override
             public void onFailure(final Throwable failure) {
                 log.debug("{}: An exception occurred during canCommit for {}: {}", name,
-                        cohortEntry.getTransactionID(), failure);
+                        cohortEntry.getTransactionId(), failure);
 
-                cohortCache.remove(cohortEntry.getTransactionID());
+                cohortCache.remove(cohortEntry.getTransactionId());
                 cohortEntry.getReplySender().tell(new Failure(failure), cohortEntry.getShard().self());
             }
         });
@@ -286,7 +288,7 @@ final class ShardCommitCoordinator {
     }
 
     private void doCommit(final CohortEntry cohortEntry) {
-        log.debug("{}: Committing transaction {}", name, cohortEntry.getTransactionID());
+        log.debug("{}: Committing transaction {}", name, cohortEntry.getTransactionId());
 
         // We perform the preCommit phase here atomically with the commit phase. This is an
         // optimization to eliminate the overhead of an extra preCommit message. We lose front-end
@@ -301,25 +303,25 @@ final class ShardCommitCoordinator {
             @Override
             public void onFailure(final Throwable failure) {
                 log.error("{} An exception occurred while preCommitting transaction {}", name,
-                        cohortEntry.getTransactionID(), failure);
+                        cohortEntry.getTransactionId(), failure);
 
-                cohortCache.remove(cohortEntry.getTransactionID());
+                cohortCache.remove(cohortEntry.getTransactionId());
                 cohortEntry.getReplySender().tell(new Failure(failure), cohortEntry.getShard().self());
             }
         });
     }
 
     private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final CohortEntry cohortEntry) {
-        log.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
+        log.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionId());
 
         cohortEntry.commit(new FutureCallback<UnsignedLong>() {
             @Override
             public void onSuccess(final UnsignedLong result) {
-                final TransactionIdentifier txId = cohortEntry.getTransactionID();
+                final TransactionIdentifier txId = cohortEntry.getTransactionId();
                 log.debug("{}: Transaction {} committed as {}, sending response to {}", persistenceId(), txId, result,
                     sender);
 
-                cohortCache.remove(cohortEntry.getTransactionID());
+                cohortCache.remove(cohortEntry.getTransactionId());
                 sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(),
                     cohortEntry.getShard().self());
             }
@@ -327,9 +329,9 @@ final class ShardCommitCoordinator {
             @Override
             public void onFailure(final Throwable failure) {
                 log.error("{}, An exception occurred while committing transaction {}", persistenceId(),
-                        cohortEntry.getTransactionID(), failure);
+                        cohortEntry.getTransactionId(), failure);
 
-                cohortCache.remove(cohortEntry.getTransactionID());
+                cohortCache.remove(cohortEntry.getTransactionId());
                 sender.tell(new Failure(failure), cohortEntry.getShard().self());
             }
         });
@@ -376,7 +378,7 @@ final class ShardCommitCoordinator {
             if (sender != null) {
                 sender.tell(AbortTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), self);
             }
-        } catch (Exception e) {
+        } catch (InterruptedException | ExecutionException | TimeoutException e) {
             log.error("{}: An exception happened during abort", name, e);
 
             if (sender != null) {
@@ -434,7 +436,7 @@ final class ShardCommitCoordinator {
                     }
 
                     // Allocate a new message
-                    final BatchedModifications ret = new BatchedModifications(cohortEntry.getTransactionID(),
+                    final BatchedModifications ret = new BatchedModifications(cohortEntry.getTransactionId(),
                         cohortEntry.getClientVersion());
                     newMessages.add(ret);
                     return ret;
@@ -454,12 +456,12 @@ final class ShardCommitCoordinator {
                     switch (cohort.getState()) {
                         case CAN_COMMIT_COMPLETE:
                         case CAN_COMMIT_PENDING:
-                            messages.add(new CanCommitTransaction(cohortEntry.getTransactionID(),
+                            messages.add(new CanCommitTransaction(cohortEntry.getTransactionId(),
                                 cohortEntry.getClientVersion()));
                             break;
                         case PRE_COMMIT_COMPLETE:
                         case PRE_COMMIT_PENDING:
-                            messages.add(new CommitTransaction(cohortEntry.getTransactionID(),
+                            messages.add(new CommitTransaction(cohortEntry.getTransactionId(),
                                 cohortEntry.getClientVersion()));
                             break;
                         default:
index 5a7ce80..7f34760 100644 (file)
@@ -18,6 +18,7 @@ import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap.Builder;
 import com.google.common.primitives.UnsignedLong;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.File;
 import java.io.IOException;
 import java.util.AbstractMap.SimpleEntry;
@@ -675,6 +676,8 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         return cohort;
     }
 
+    @SuppressFBWarnings(value = {"RV_RETURN_VALUE_IGNORED", "DB_DUPLICATE_SWITCH_CLAUSES"},
+            justification = "See inline comments below.")
     void checkForExpiredTransactions(final long transactionCommitTimeoutMillis) {
         final long timeout = TimeUnit.MILLISECONDS.toNanos(transactionCommitTimeoutMillis);
         final long now = shard.ticker().read();
@@ -688,6 +691,9 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                     pendingTransactions.poll().cohort.failedCanCommit(new TimeoutException());
                     break;
                 case CAN_COMMIT_COMPLETE:
+                    // The suppression of the FindBugs "DB_DUPLICATE_SWITCH_CLAUSES" warning pertains to this clause
+                    // whose code is duplicated with PRE_COMMIT_COMPLETE. The clauses aren't combined in case the code
+                    // in PRE_COMMIT_COMPLETE is changed.
                     pendingTransactions.poll().cohort.reportFailure(new TimeoutException());
                     break;
                 case PRE_COMMIT_PENDING:
@@ -724,6 +730,9 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
                 case FAILED:
                 case READY:
                 default:
+                    // The suppression of the FindBugs "RV_RETURN_VALUE_IGNORED" warning pertains to this line. In
+                    // this case, we just want to drop the current entry that expired and thus ignore the return value.
+                    // In fact we really shouldn't hit this case but we handle all enums for completeness.
                     pendingTransactions.poll();
             }
 
@@ -733,6 +742,7 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
         }
     }
 
+    @SuppressFBWarnings(value = "RV_RETURN_VALUE_IGNORED", justification = "See inline comment below.")
     void startAbort(final SimpleShardDataTreeCohort cohort) {
         final Iterator<CommitEntry> it = pendingTransactions.iterator();
         if (!it.hasNext()) {
@@ -746,6 +756,10 @@ public class ShardDataTree extends ShardDataTreeTransactionParent {
             if (cohort.getState() != State.COMMIT_PENDING) {
                 LOG.debug("{}: aborted head of queue {} in state {}", logContext, cohort.getIdentifier(),
                     cohort.getIdentifier());
+
+                // The suppression of the FindBugs "RV_RETURN_VALUE_IGNORED" warning pertains to this line. In
+                // this case, we've already obtained the head of the queue above via the Iterator and we just want to
+                // remove it here.
                 pendingTransactions.poll();
                 processNextTransaction();
             } else {
index 2b109b0..e3fbb82 100644 (file)
@@ -15,6 +15,7 @@ import akka.actor.ReceiveTimeout;
 import akka.japi.Creator;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
@@ -34,13 +35,13 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 public abstract class ShardTransaction extends AbstractUntypedActorWithMetering {
     private final ActorRef shardActor;
     private final ShardStats shardStats;
-    private final TransactionIdentifier transactionID;
+    private final TransactionIdentifier transactionId;
 
-    protected ShardTransaction(ActorRef shardActor, ShardStats shardStats, TransactionIdentifier transactionID) {
+    protected ShardTransaction(ActorRef shardActor, ShardStats shardStats, TransactionIdentifier transactionId) {
         super("shard-tx"); //actor name override used for metering. This does not change the "real" actor name
         this.shardActor = shardActor;
         this.shardStats = shardStats;
-        this.transactionID = Preconditions.checkNotNull(transactionID);
+        this.transactionId = Preconditions.checkNotNull(transactionId);
     }
 
     public static Props props(TransactionType type, AbstractShardDataTreeTransaction<?> transaction,
@@ -54,8 +55,8 @@ public abstract class ShardTransaction extends AbstractUntypedActorWithMetering
         return shardActor;
     }
 
-    protected final TransactionIdentifier getTransactionID() {
-        return transactionID;
+    protected final TransactionIdentifier getTransactionId() {
+        return transactionId;
     }
 
     @Override
@@ -63,7 +64,7 @@ public abstract class ShardTransaction extends AbstractUntypedActorWithMetering
         if (CloseTransaction.isSerializedType(message)) {
             closeTransaction(true);
         } else if (message instanceof ReceiveTimeout) {
-            LOG.debug("Got ReceiveTimeout for inactivity - closing transaction {}", transactionID);
+            LOG.debug("Got ReceiveTimeout for inactivity - closing transaction {}", transactionId);
             closeTransaction(false);
         } else {
             unknownMessage(message);
@@ -115,6 +116,8 @@ public abstract class ShardTransaction extends AbstractUntypedActorWithMetering
         getSender().tell(new DataExistsReply(exists, message.getVersion()).toSerializable(), getSelf());
     }
 
+    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Some fields are not Serializable but we don't "
+            + "create remote instances of this actor and thus don't need it to be Serializable.")
     private static class ShardTransactionCreator implements Creator<ShardTransaction> {
 
         private static final long serialVersionUID = 1L;
index ee61090..9c304ec 100644 (file)
@@ -110,7 +110,7 @@ public class ShardWriteTransaction extends ShardTransaction {
     }
 
     private void readyTransaction(boolean returnSerialized, boolean doImmediateCommit, short clientTxVersion) {
-        TransactionIdentifier transactionID = getTransactionID();
+        TransactionIdentifier transactionID = getTransactionId();
 
         LOG.debug("readyTransaction : {}", transactionID);
 
index 71eaf7d..7da174f 100644 (file)
@@ -20,7 +20,6 @@ import java.util.Optional;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeoutException;
 import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.concepts.Identifiable;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
@@ -28,7 +27,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
 
-final class SimpleShardDataTreeCohort extends ShardDataTreeCohort implements Identifiable<TransactionIdentifier> {
+final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
     private static final Logger LOG = LoggerFactory.getLogger(SimpleShardDataTreeCohort.class);
     private static final ListenableFuture<Void> VOID_FUTURE = Futures.immediateFuture(null);
     private final DataTreeModification transaction;
index 17017b9..8f62716 100644 (file)
@@ -87,11 +87,12 @@ public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort<
         }
 
         final AtomicInteger completed = new AtomicInteger(cohorts.size());
+        final Object lock = new Object();
         for (final CohortInfo info: cohorts) {
             info.getActorFuture().onComplete(new OnComplete<ActorSelection>() {
                 @Override
                 public void onComplete(Throwable failure, ActorSelection actor)  {
-                    synchronized (completed) {
+                    synchronized (lock) {
                         boolean done = completed.decrementAndGet() == 0;
                         if (failure != null) {
                             LOG.debug("Tx {}: a cohort Future failed", transactionId, failure);
index 06ae6ec..2038495 100644 (file)
@@ -59,8 +59,8 @@ public class ConfigurationImpl implements Configuration {
     private static Map<String, String> createNamespaceToModuleName(Iterable<ModuleConfig> moduleConfigs) {
         final ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
         for (ModuleConfig moduleConfig : moduleConfigs) {
-            if (moduleConfig.getNameSpace() != null) {
-                builder.put(moduleConfig.getNameSpace(), moduleConfig.getName());
+            if (moduleConfig.getNamespace() != null) {
+                builder.put(moduleConfig.getNamespace(), moduleConfig.getName());
             }
         }
 
@@ -149,7 +149,7 @@ public class ConfigurationImpl implements Configuration {
         updateModuleConfigMap(moduleConfig);
 
         namespaceToModuleName = ImmutableMap.<String, String>builder().putAll(namespaceToModuleName)
-                .put(moduleConfig.getNameSpace(), moduleConfig.getName()).build();
+                .put(moduleConfig.getNamespace(), moduleConfig.getName()).build();
         allShardNames = ImmutableSet.<String>builder().addAll(allShardNames).add(config.getShardName()).build();
     }
 
index d5094a5..353c80c 100644 (file)
@@ -24,14 +24,14 @@ import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy
  */
 public class ModuleConfig {
     private final String name;
-    private final String nameSpace;
+    private final String namespace;
     private final ShardStrategy shardStrategy;
     private final Map<String, ShardConfig> shardConfigs;
 
-    private ModuleConfig(String name, String nameSpace, ShardStrategy shardStrategy,
+    private ModuleConfig(String name, String namespace, ShardStrategy shardStrategy,
             Map<String, ShardConfig> shardConfigs) {
         this.name = name;
-        this.nameSpace = nameSpace;
+        this.namespace = namespace;
         this.shardStrategy = shardStrategy;
         this.shardConfigs = shardConfigs;
     }
@@ -42,8 +42,8 @@ public class ModuleConfig {
     }
 
     @Nullable
-    public String getNameSpace() {
-        return nameSpace;
+    public String getNamespace() {
+        return namespace;
     }
 
     @Nullable
@@ -86,7 +86,7 @@ public class ModuleConfig {
 
         private Builder(ModuleConfig moduleConfig) {
             this.name = moduleConfig.getName();
-            this.nameSpace = moduleConfig.getNameSpace();
+            this.nameSpace = moduleConfig.getNamespace();
             this.shardStrategy = moduleConfig.getShardStrategy();
             for (ShardConfig shardConfig : moduleConfig.getShardConfigs()) {
                 shardConfigs.put(shardConfig.getName(), shardConfig);
index a2c7601..beee118 100644 (file)
@@ -113,7 +113,7 @@ public class DistributedEntityOwnershipService implements DOMEntityOwnershipServ
                 if (failure != null) {
                     LOG.debug("Error sending message {} to {}", message, shardActor, failure);
                 } else {
-                    LOG.debug("{} message to {} succeeded", message, shardActor, failure);
+                    LOG.debug("{} message to {} succeeded", message, shardActor);
                 }
             }
         }, context.getClientDispatcher());
index 450a675..fbffd5b 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore.entityownership;
 import akka.actor.Props;
 import akka.japi.Creator;
 import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
 import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
@@ -53,6 +54,8 @@ class EntityOwnershipListenerActor extends AbstractUntypedActor {
     private static final class EntityOwnershipListenerCreator implements Creator<EntityOwnershipListenerActor> {
         private static final long serialVersionUID = 1L;
 
+        @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but we don't "
+                + "create remote instances of this actor and thus don't need it to be Serializable.")
         private final DOMEntityOwnershipListener listener;
 
         EntityOwnershipListenerCreator(DOMEntityOwnershipListener listener) {
index e0bd7f1..47de119 100644 (file)
@@ -235,7 +235,7 @@ class EntityOwnershipShard extends Shard {
     void tryCommitModifications(final BatchedModifications modifications) {
         if (isLeader()) {
             LOG.debug("{}: Committing BatchedModifications {} locally", persistenceId(),
-                    modifications.getTransactionID());
+                    modifications.getTransactionId());
 
             // Note that it's possible the commit won't get consensus and will timeout and not be applied
             // to the state. However we don't need to retry it in that case b/c it will be committed to
@@ -248,7 +248,7 @@ class EntityOwnershipShard extends Shard {
                 possiblyRemoveAllInitialCandidates(leader);
 
                 LOG.debug("{}: Sending BatchedModifications {} to leader {}", persistenceId(),
-                        modifications.getTransactionID(), leader);
+                        modifications.getTransactionId(), leader);
 
                 Future<Object> future = Patterns.ask(leader, modifications, TimeUnit.SECONDS.toMillis(
                         getDatastoreContext().getShardTransactionCommitTimeoutInSeconds()));
index 621354d..9c65ed7 100644 (file)
@@ -84,7 +84,7 @@ class EntityOwnershipShardCommitCoordinator {
         }
 
         if (shard.hasLeader()) {
-            log.debug("Retrying commit for BatchedModifications {}", inflightCommit.getTransactionID());
+            log.debug("Retrying commit for BatchedModifications {}", inflightCommit.getTransactionId());
 
             shard.tryCommitModifications(inflightCommit);
         } else {
@@ -98,7 +98,7 @@ class EntityOwnershipShardCommitCoordinator {
             return;
         }
 
-        log.debug("Inflight BatchedModifications {} commit failed", inflightCommit.getTransactionID(), cause);
+        log.debug("Inflight BatchedModifications {} commit failed", inflightCommit.getTransactionId(), cause);
 
         if (!(cause instanceof NoShardLeaderException)) {
             // If the failure is other than NoShardLeaderException the commit may have been partially
@@ -113,7 +113,7 @@ class EntityOwnershipShardCommitCoordinator {
         FiniteDuration duration = shard.getDatastoreContext().getShardRaftConfig().getElectionTimeOutInterval();
 
         log.debug("Scheduling retry for BatchedModifications commit {} in {}",
-                inflightCommit.getTransactionID(), duration);
+                inflightCommit.getTransactionId(), duration);
 
         retryCommitSchedule = shard.getContext().system().scheduler().scheduleOnce(duration, shard.getSelf(),
                 COMMIT_RETRY_MESSAGE, shard.getContext().dispatcher(), ActorRef.noSender());
@@ -129,7 +129,7 @@ class EntityOwnershipShardCommitCoordinator {
             retryCommitSchedule.cancel();
         }
 
-        log.debug("BatchedModifications commit {} succeeded", inflightCommit.getTransactionID());
+        log.debug("BatchedModifications commit {} succeeded", inflightCommit.getTransactionId());
 
         inflightCommit = null;
         commitNextBatch(shard);
@@ -151,7 +151,7 @@ class EntityOwnershipShardCommitCoordinator {
             }
         }
 
-        log.debug("Committing next BatchedModifications {}, size {}", inflightCommit.getTransactionID(),
+        log.debug("Committing next BatchedModifications {}, size {}", inflightCommit.getTransactionId(),
                 inflightCommit.getModifications().size());
 
         shard.tryCommitModifications(inflightCommit);
@@ -233,7 +233,7 @@ class EntityOwnershipShardCommitCoordinator {
 
     @Nullable
     private BatchedModifications pruneModifications(BatchedModifications toPrune) {
-        BatchedModifications prunedModifications = new BatchedModifications(toPrune.getTransactionID(),
+        BatchedModifications prunedModifications = new BatchedModifications(toPrune.getTransactionId(),
                 toPrune.getVersion());
         prunedModifications.setDoCommitOnReady(toPrune.isDoCommitOnReady());
         prunedModifications.setReady(toPrune.isReady());
index b42926d..08632a8 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
 
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.cluster.datastore.Shard;
 
 /**
  * Factory for creating ShardStats mbeans.
@@ -15,9 +17,10 @@ package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
  */
 public class ShardMBeanFactory {
 
-    public static ShardStats getShardStatsMBean(final String shardName, final String mxBeanType) {
+    public static ShardStats getShardStatsMBean(final String shardName, final String mxBeanType,
+            @Nonnull final Shard shard) {
         String finalMXBeanType = mxBeanType != null ? mxBeanType : "DistDataStore";
-        ShardStats shardStatsMBeanImpl = new ShardStats(shardName, finalMXBeanType);
+        ShardStats shardStatsMBeanImpl = new ShardStats(shardName, finalMXBeanType, shard);
         shardStatsMBeanImpl.registerMBean();
         return shardStatsMBeanImpl;
     }
index 6e8ff14..e5af6dd 100644 (file)
@@ -21,6 +21,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
+import javax.annotation.Nullable;
 import org.opendaylight.controller.cluster.datastore.Shard;
 import org.opendaylight.controller.cluster.raft.base.messages.InitiateCaptureSnapshot;
 import org.opendaylight.controller.cluster.raft.client.messages.FollowerInfo;
@@ -35,7 +36,7 @@ import scala.concurrent.Await;
  * @author  Basheeruddin syedbahm@cisco.com
  */
 public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
-    public static String JMX_CATEGORY_SHARD = "Shards";
+    public static final String JMX_CATEGORY_SHARD = "Shards";
 
     private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
 
@@ -60,7 +61,7 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
 
     private boolean followerInitialSyncStatus = false;
 
-    private Shard shard;
+    private final Shard shard;
 
     private String statRetrievalError;
 
@@ -70,11 +71,8 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
 
     private long lastLeadershipChangeTime;
 
-    public ShardStats(final String shardName, final String mxBeanType) {
+    public ShardStats(final String shardName, final String mxBeanType, @Nullable final Shard shard) {
         super(shardName, mxBeanType, JMX_CATEGORY_SHARD);
-    }
-
-    public void setShard(Shard shard) {
         this.shard = shard;
     }
 
@@ -214,7 +212,9 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
 
     @Override
     public String getLastCommittedTransactionTime() {
-        return DATE_FORMAT.format(new Date(lastCommittedTransactionTime));
+        synchronized (DATE_FORMAT) {
+            return DATE_FORMAT.format(new Date(lastCommittedTransactionTime));
+        }
     }
 
     @Override
@@ -344,17 +344,19 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
 
     @Override
     public String getLastLeadershipChangeTime() {
-        return DATE_FORMAT.format(new Date(lastLeadershipChangeTime));
+        synchronized (DATE_FORMAT) {
+            return DATE_FORMAT.format(new Date(lastLeadershipChangeTime));
+        }
     }
 
     @Override
     public int getPendingTxCommitQueueSize() {
-        return shard.getPendingTxCommitQueueSize();
+        return shard != null ? shard.getPendingTxCommitQueueSize() : -1;
     }
 
     @Override
     public int getTxCohortCacheSize() {
-        return shard.getCohortCacheSize();
+        return shard != null ? shard.getCohortCacheSize() : -1;
     }
 
     @Override
index e69bc95..ba6b64f 100644 (file)
@@ -21,34 +21,34 @@ import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier
 public abstract class AbstractThreePhaseCommitMessage extends VersionedExternalizableMessage {
     private static final long serialVersionUID = 1L;
 
-    private TransactionIdentifier transactionID;
+    private TransactionIdentifier transactionId;
 
     protected AbstractThreePhaseCommitMessage() {
     }
 
-    protected AbstractThreePhaseCommitMessage(final TransactionIdentifier transactionID, final short version) {
+    protected AbstractThreePhaseCommitMessage(final TransactionIdentifier transactionId, final short version) {
         super(version);
-        this.transactionID = Preconditions.checkNotNull(transactionID);
+        this.transactionId = Preconditions.checkNotNull(transactionId);
     }
 
-    public TransactionIdentifier getTransactionID() {
-        return transactionID;
+    public TransactionIdentifier getTransactionId() {
+        return transactionId;
     }
 
     @Override
     public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
-        transactionID = TransactionIdentifier.readFrom(in);
+        transactionId = TransactionIdentifier.readFrom(in);
     }
 
     @Override
     public void writeExternal(ObjectOutput out) throws IOException {
         super.writeExternal(out);
-        transactionID.writeTo(out);
+        transactionId.writeTo(out);
     }
 
     @Override
     public String toString() {
-        return getClass().getSimpleName() + " [transactionID=" + transactionID + ", version=" + getVersion() + "]";
+        return getClass().getSimpleName() + " [transactionId=" + transactionId + ", version=" + getVersion() + "]";
     }
 }
index 0ab93eb..da87a08 100644 (file)
@@ -25,14 +25,14 @@ public class BatchedModifications extends MutableCompositeModification {
     private boolean ready;
     private boolean doCommitOnReady;
     private int totalMessagesSent;
-    private TransactionIdentifier transactionID;
+    private TransactionIdentifier transactionId;
 
     public BatchedModifications() {
     }
 
-    public BatchedModifications(TransactionIdentifier transactionID, short version) {
+    public BatchedModifications(TransactionIdentifier transactionId, short version) {
         super(version);
-        this.transactionID = Preconditions.checkNotNull(transactionID, "transactionID can't be null");
+        this.transactionId = Preconditions.checkNotNull(transactionId, "transactionID can't be null");
     }
 
     public boolean isReady() {
@@ -59,15 +59,15 @@ public class BatchedModifications extends MutableCompositeModification {
         this.totalMessagesSent = totalMessagesSent;
     }
 
-    public TransactionIdentifier getTransactionID() {
-        return transactionID;
+    public TransactionIdentifier getTransactionId() {
+        return transactionId;
     }
 
 
     @Override
     public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
         super.readExternal(in);
-        transactionID = TransactionIdentifier.readFrom(in);
+        transactionId = TransactionIdentifier.readFrom(in);
         ready = in.readBoolean();
         totalMessagesSent = in.readInt();
         doCommitOnReady = in.readBoolean();
@@ -76,7 +76,7 @@ public class BatchedModifications extends MutableCompositeModification {
     @Override
     public void writeExternal(ObjectOutput out) throws IOException {
         super.writeExternal(out);
-        transactionID.writeTo(out);
+        transactionId.writeTo(out);
         out.writeBoolean(ready);
         out.writeInt(totalMessagesSent);
         out.writeBoolean(doCommitOnReady);
@@ -85,7 +85,7 @@ public class BatchedModifications extends MutableCompositeModification {
     @Override
     public String toString() {
         StringBuilder builder = new StringBuilder();
-        builder.append("BatchedModifications [transactionID=").append(transactionID).append(", ready=").append(ready)
+        builder.append("BatchedModifications [transactionId=").append(transactionId).append(", ready=").append(ready)
             .append(", totalMessagesSent=").append(totalMessagesSent).append(", modifications size=")
             .append(getModifications().size()).append("]");
         return builder.toString();
index 54c0d74..23ad0f9 100644 (file)
@@ -8,6 +8,7 @@
 package org.opendaylight.controller.cluster.datastore.messages;
 
 import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.Serializable;
 import java.util.List;
 import javax.annotation.Nonnull;
@@ -25,6 +26,9 @@ public class DatastoreSnapshot implements Serializable {
     private final byte[] shardManagerSnapshot;
     private final List<ShardSnapshot> shardSnapshots;
 
+    @SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "Stores a reference to an externally mutable byte[] "
+            + "object but this is OK since this class is merely a DTO and does not process byte[] internally. "
+            + "Also it would be inefficient to create a return copy as the byte[] could be large.")
     public DatastoreSnapshot(@Nonnull String type, @Nullable byte[] shardManagerSnapshot,
             @Nonnull List<ShardSnapshot> shardSnapshots) {
         this.type = Preconditions.checkNotNull(type);
@@ -37,6 +41,9 @@ public class DatastoreSnapshot implements Serializable {
         return type;
     }
 
+    @SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "Exposes a mutable object stored in a field but "
+            + "this is OK since this class is merely a DTO and does not process byte[] internally. "
+            + "Also it would be inefficient to create a return copy as the byte[] could be large.")
     @Nullable
     public byte[] getShardManagerSnapshot() {
         return shardManagerSnapshot;
@@ -63,6 +70,9 @@ public class DatastoreSnapshot implements Serializable {
             return name;
         }
 
+        @SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "Exposes a mutable object stored in a field but "
+                + "this is OK since this class is merely a DTO and does not process byte[] internally. "
+                + "Also it would be inefficient to create a return copy as the byte[] could be large.")
         @Nonnull
         public byte[] getSnapshot() {
             return snapshot;
index 08784d1..9cd5e66 100644 (file)
@@ -17,21 +17,21 @@ import org.opendaylight.controller.cluster.datastore.ReadWriteShardDataTreeTrans
  * @author Thomas Pantelis
  */
 public class ForwardedReadyTransaction {
-    private final TransactionIdentifier transactionID;
+    private final TransactionIdentifier transactionId;
     private final ReadWriteShardDataTreeTransaction transaction;
     private final boolean doImmediateCommit;
     private final short txnClientVersion;
 
-    public ForwardedReadyTransaction(TransactionIdentifier transactionID, short txnClientVersion,
+    public ForwardedReadyTransaction(TransactionIdentifier transactionId, short txnClientVersion,
             ReadWriteShardDataTreeTransaction transaction, boolean doImmediateCommit) {
-        this.transactionID = Preconditions.checkNotNull(transactionID);
+        this.transactionId = Preconditions.checkNotNull(transactionId);
         this.transaction = Preconditions.checkNotNull(transaction);
         this.txnClientVersion = txnClientVersion;
         this.doImmediateCommit = doImmediateCommit;
     }
 
-    public TransactionIdentifier getTransactionID() {
-        return transactionID;
+    public TransactionIdentifier getTransactionId() {
+        return transactionId;
     }
 
     public ReadWriteShardDataTreeTransaction getTransaction() {
@@ -48,7 +48,7 @@ public class ForwardedReadyTransaction {
 
     @Override
     public String toString() {
-        return "ForwardedReadyTransaction [transactionID=" + transactionID + ", doImmediateCommit=" + doImmediateCommit
+        return "ForwardedReadyTransaction [transactionId=" + transactionId + ", doImmediateCommit=" + doImmediateCommit
                 + ", txnClientVersion=" + txnClientVersion + "]";
     }
 }
index 9556087..2664fc1 100644 (file)
@@ -21,21 +21,21 @@ import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification
  */
 public final class ReadyLocalTransaction {
     private final DataTreeModification modification;
-    private final TransactionIdentifier transactionID;
+    private final TransactionIdentifier transactionId;
     private final boolean doCommitOnReady;
 
     // The version of the remote system used only when needing to convert to BatchedModifications.
     private short remoteVersion = DataStoreVersions.CURRENT_VERSION;
 
-    public ReadyLocalTransaction(final TransactionIdentifier transactionID, final DataTreeModification modification,
+    public ReadyLocalTransaction(final TransactionIdentifier transactionId, final DataTreeModification modification,
             final boolean doCommitOnReady) {
-        this.transactionID = Preconditions.checkNotNull(transactionID);
+        this.transactionId = Preconditions.checkNotNull(transactionId);
         this.modification = Preconditions.checkNotNull(modification);
         this.doCommitOnReady = doCommitOnReady;
     }
 
-    public TransactionIdentifier getTransactionID() {
-        return transactionID;
+    public TransactionIdentifier getTransactionId() {
+        return transactionId;
     }
 
     public DataTreeModification getModification() {
index b3254f5..d095cbd 100644 (file)
@@ -33,7 +33,7 @@ public final class ReadyLocalTransactionSerializer extends JSerializer {
     public byte[] toBinary(final Object obj) {
         Preconditions.checkArgument(obj instanceof ReadyLocalTransaction, "Unsupported object type %s", obj.getClass());
         final ReadyLocalTransaction readyLocal = (ReadyLocalTransaction) obj;
-        final BatchedModifications batched = new BatchedModifications(readyLocal.getTransactionID(),
+        final BatchedModifications batched = new BatchedModifications(readyLocal.getTransactionId(),
                 readyLocal.getRemoteVersion());
         batched.setDoCommitOnReady(readyLocal.isDoCommitOnReady());
         batched.setTotalMessagesSent(1);
index 2741ca0..c9c0c0c 100644 (file)
@@ -26,6 +26,10 @@ public final class RegisterDataTreeChangeListener implements Externalizable, Lis
     private YangInstanceIdentifier path;
     private boolean registerOnAllInstances;
 
+    public RegisterDataTreeChangeListener() {
+        // For Externalizable
+    }
+
     public RegisterDataTreeChangeListener(final YangInstanceIdentifier path, final ActorRef dataTreeChangeListenerPath,
             final boolean registerOnAllInstances) {
         this.path = Preconditions.checkNotNull(path);
index 4ad8260..86e6f3a 100644 (file)
@@ -9,6 +9,7 @@ package org.opendaylight.controller.cluster.datastore.persisted;
 
 import com.google.common.base.MoreObjects;
 import com.google.common.collect.ImmutableList;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
@@ -61,6 +62,9 @@ public final class FrontendShardDataTreeSnapshotMetadata extends
 
     private static final long serialVersionUID = 1L;
 
+    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
+            + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
+            + "aren't serialized. FindBugs does not recognize this.")
     private final List<FrontendClientMetadata> clients;
 
     public FrontendShardDataTreeSnapshotMetadata(final Collection<FrontendClientMetadata> clients) {
index 985314c..5cabd71 100644 (file)
@@ -12,6 +12,7 @@ import com.google.common.base.Preconditions;
 import com.google.common.base.Verify;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap.Builder;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import java.io.Externalizable;
 import java.io.IOException;
 import java.io.ObjectInput;
@@ -88,7 +89,12 @@ public final class MetadataShardDataTreeSnapshot extends AbstractVersionedShardD
 
     private static final long serialVersionUID = 1L;
 
+    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
+            + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
+            + "aren't serialized. FindBugs does not recognize this.")
     private final Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metadata;
+
+    @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "See above justification.")
     private final NormalizedNode<?, ?> rootNode;
 
     public MetadataShardDataTreeSnapshot(final NormalizedNode<?, ?> rootNode) {
index 9ee1820..72d182d 100644 (file)
@@ -20,7 +20,7 @@ public abstract class AbstractShardManagerCreator<T extends AbstractShardManager
     private ClusterWrapper cluster;
     private Configuration configuration;
     private DatastoreContextFactory datastoreContextFactory;
-    private CountDownLatch waitTillReadyCountdownLatch;
+    private CountDownLatch waitTillReadyCountDownLatch;
     private PrimaryShardInfoFutureCache primaryShardInfoCache;
     private DatastoreSnapshot restoreFromSnapshot;
     private volatile boolean sealed;
@@ -68,13 +68,13 @@ public abstract class AbstractShardManagerCreator<T extends AbstractShardManager
         return self();
     }
 
-    CountDownLatch getWaitTillReadyCountdownLatch() {
-        return waitTillReadyCountdownLatch;
+    CountDownLatch getWaitTillReadyCountDownLatch() {
+        return waitTillReadyCountDownLatch;
     }
 
-    public T waitTillReadyCountdownLatch(CountDownLatch newWaitTillReadyCountdownLatch) {
+    public T waitTillReadyCountDownLatch(CountDownLatch newWaitTillReadyCountDownLatch) {
         checkSealed();
-        this.waitTillReadyCountdownLatch = newWaitTillReadyCountdownLatch;
+        this.waitTillReadyCountDownLatch = newWaitTillReadyCountDownLatch;
         return self();
     }
 
@@ -103,7 +103,7 @@ public abstract class AbstractShardManagerCreator<T extends AbstractShardManager
         Preconditions.checkNotNull(cluster, "cluster should not be null");
         Preconditions.checkNotNull(configuration, "configuration should not be null");
         Preconditions.checkNotNull(datastoreContextFactory, "datastoreContextFactory should not be null");
-        Preconditions.checkNotNull(waitTillReadyCountdownLatch, "waitTillReadyCountdownLatch should not be null");
+        Preconditions.checkNotNull(waitTillReadyCountDownLatch, "waitTillReadyCountdownLatch should not be null");
         Preconditions.checkNotNull(primaryShardInfoCache, "primaryShardInfoCache should not be null");
     }
 
index 7653aea..bbf5cf3 100644 (file)
@@ -34,6 +34,7 @@ import akka.util.Timeout;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import java.io.ByteArrayInputStream;
+import java.io.IOException;
 import java.io.ObjectInputStream;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -160,7 +161,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
         this.type = datastoreContextFactory.getBaseDatastoreContext().getDataStoreName();
         this.shardDispatcherPath =
                 new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
-        this.waitTillReadyCountdownLatch = builder.getWaitTillReadyCountdownLatch();
+        this.waitTillReadyCountdownLatch = builder.getWaitTillReadyCountDownLatch();
         this.primaryShardInfoCache = builder.getPrimaryShardInfoCache();
         this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
 
@@ -655,7 +656,7 @@ class ShardManager extends AbstractUntypedPersistentActorWithMetering {
                 LOG.debug("{}: Deserialized restored ShardManagerSnapshot: {}", persistenceId(), snapshot);
 
                 applyShardManagerSnapshot(snapshot);
-            } catch (Exception e) {
+            } catch (ClassNotFoundException | IOException e) {
                 LOG.error("{}: Error deserializing restored ShardManagerSnapshot", persistenceId(), e);
             }
         }
index 84717e1..dde7e60 100644 (file)
@@ -12,6 +12,7 @@ import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import javax.xml.stream.XMLStreamException;
 import org.opendaylight.controller.cluster.datastore.util.AbstractDataTreeModificationCursor;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
@@ -35,7 +36,7 @@ public final class DataTreeModificationOutput {
     public static void toFile(File file, DataTreeModification modification) {
         try (FileOutputStream outStream = new FileOutputStream(file)) {
             modification.applyToCursor(new DataTreeModificationOutputCursor(new DataOutputStream(outStream)));
-        } catch (Exception e) {
+        } catch (IOException | RuntimeException e) {
             LOG.error("Error writing DataTreeModification to file {}", file, e);
         }
     }
@@ -50,8 +51,8 @@ public final class DataTreeModificationOutput {
         @Override
         public void delete(PathArgument child) {
             try {
-                output.write("\nDELETE -> ".getBytes());
-                output.write(current().node(child).toString().getBytes());
+                output.write("\nDELETE -> ".getBytes(StandardCharsets.UTF_8));
+                output.write(current().node(child).toString().getBytes(StandardCharsets.UTF_8));
                 output.writeByte('\n');
             } catch (IOException e) {
                 Throwables.propagate(e);
@@ -71,10 +72,10 @@ public final class DataTreeModificationOutput {
         private void outputPathAndNode(String name, PathArgument child, NormalizedNode<?, ?> data) {
             try {
                 output.writeByte('\n');
-                output.write(name.getBytes());
-                output.write(" -> ".getBytes());
-                output.write(current().node(child).toString().getBytes());
-                output.write(": \n".getBytes());
+                output.write(name.getBytes(StandardCharsets.UTF_8));
+                output.write(" -> ".getBytes(StandardCharsets.UTF_8));
+                output.write(current().node(child).toString().getBytes(StandardCharsets.UTF_8));
+                output.write(": \n".getBytes(StandardCharsets.UTF_8));
                 NormalizedNodeXMLOutput.toStream(output, data);
                 output.writeByte('\n');
             } catch (IOException | XMLStreamException e) {
index 585218f..95c37f5 100644 (file)
@@ -12,6 +12,8 @@ import akka.actor.Props;
 import akka.osgi.BundleDelegatingClassLoader;
 import com.typesafe.config.Config;
 import com.typesafe.config.ConfigFactory;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
 import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.ActorSystemProvider;
 import org.opendaylight.controller.cluster.ActorSystemProviderListener;
@@ -40,8 +42,9 @@ public class ActorSystemProviderImpl implements ActorSystemProvider, AutoCloseab
 
         final Bundle bundle = bundleContext.getBundle();
 
-        final BundleDelegatingClassLoader classLoader = new BundleDelegatingClassLoader(bundle, Thread.currentThread()
-                .getContextClassLoader());
+        final BundleDelegatingClassLoader classLoader = AccessController.doPrivileged(
+            (PrivilegedAction<BundleDelegatingClassLoader>) () ->
+                new BundleDelegatingClassLoader(bundle, Thread.currentThread().getContextClassLoader()));
 
         final AkkaConfigurationReader configurationReader = new FileAkkaConfigurationReader();
         final Config akkaConfig = ConfigFactory.load(configurationReader.read()).getConfig(CONFIGURATION_NAME);
index beff52d..6ccede2 100644 (file)
@@ -47,7 +47,7 @@ public class ShardTransactionFailureTest extends AbstractActorTest {
 
     private final DatastoreContext datastoreContext = DatastoreContext.newBuilder().build();
 
-    private final ShardStats shardStats = new ShardStats(SHARD_IDENTIFIER.toString(), "DataStore");
+    private final ShardStats shardStats = new ShardStats(SHARD_IDENTIFIER.toString(), "DataStore", null);
 
     private ActorRef createShard() {
         ActorRef shard = getSystem().actorOf(Shard.builder().id(SHARD_IDENTIFIER).datastoreContext(datastoreContext)
index fd81c67..0e6cf53 100644 (file)
@@ -343,7 +343,7 @@ public class ThreePhaseCommitCohortProxyTest extends AbstractActorTest {
             try {
                 assertNotNull("Unexpected " + name, expType);
                 assertEquals(name + " type", expType, rawMessage.getClass());
-                assertEquals(name + " transactionId", builder.transactionId, actualMessage.getTransactionID());
+                assertEquals(name + " transactionId", builder.transactionId, actualMessage.getTransactionId());
 
                 if (reply instanceof Throwable) {
                     getSender().tell(new akka.actor.Status.Failure((Throwable)reply), self());
index f048fab..40903cb 100644 (file)
@@ -26,7 +26,7 @@ public class ShardStatsTest {
     @Before
     public void setUp() throws Exception {
 
-        shardStats = new ShardStats("shard-1", "DataStore");
+        shardStats = new ShardStats("shard-1", "DataStore", null);
         shardStats.registerMBean();
         mbeanServer = ManagementFactory.getPlatformMBeanServer();
         String objectName = AbstractMXBean.BASE_JMX_PREFIX + "type=" + shardStats.getMBeanType() + ",Category="
index 208e9ab..bf6146f 100644 (file)
@@ -33,7 +33,7 @@ public class AbortTransactionTest {
 
         AbortTransaction actual = AbortTransaction.fromSerializable(
                 SerializationUtils.clone((Serializable) serialized));
-        assertEquals("getTransactionID", expected.getTransactionID(), actual.getTransactionID());
+        assertEquals("getTransactionID", expected.getTransactionId(), actual.getTransactionId());
         assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
     }
 
index 3bb8ef2..1d4cc3d 100644 (file)
@@ -56,7 +56,7 @@ public class BatchedModificationsTest extends AbstractTest {
                 (Serializable) batched.toSerializable());
 
         assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, clone.getVersion());
-        assertEquals("getTransactionID", tx1, clone.getTransactionID());
+        assertEquals("getTransactionID", tx1, clone.getTransactionId());
         assertEquals("isReady", true, clone.isReady());
         assertEquals("getTotalMessagesSent", 5, clone.getTotalMessagesSent());
 
@@ -83,7 +83,7 @@ public class BatchedModificationsTest extends AbstractTest {
         clone = (BatchedModifications) SerializationUtils.clone((Serializable) batched.toSerializable());
 
         assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, clone.getVersion());
-        assertEquals("getTransactionID", tx2, clone.getTransactionID());
+        assertEquals("getTransactionID", tx2, clone.getTransactionId());
         assertEquals("isReady", false, clone.isReady());
 
         assertEquals("getModifications size", 0, clone.getModifications().size());
index 5c2f317..0449a29 100644 (file)
@@ -32,7 +32,7 @@ public class CanCommitTransactionTest extends AbstractTest {
 
         CanCommitTransaction actual = CanCommitTransaction.fromSerializable(
                 SerializationUtils.clone((Serializable) serialized));
-        assertEquals("getTransactionID", expected.getTransactionID(), actual.getTransactionID());
+        assertEquals("getTransactionID", expected.getTransactionId(), actual.getTransactionId());
         assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
     }
 
index 71dd766..ca3b2be 100644 (file)
@@ -31,7 +31,7 @@ public class CommitTransactionTest extends AbstractTest {
 
         CommitTransaction actual = CommitTransaction.fromSerializable(
                 SerializationUtils.clone((Serializable) serialized));
-        assertEquals("getTransactionID", expected.getTransactionID(), actual.getTransactionID());
+        assertEquals("getTransactionID", expected.getTransactionId(), actual.getTransactionId());
         assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
     }
 
index 3ff8163..3546a50 100644 (file)
@@ -57,7 +57,7 @@ public class ReadyLocalTransactionSerializerTest extends AbstractTest {
         assertNotNull("fromBinary returned null", deserialized);
         assertEquals("fromBinary return type", BatchedModifications.class, deserialized.getClass());
         BatchedModifications batched = (BatchedModifications)deserialized;
-        assertEquals("getTransactionID", txId, batched.getTransactionID());
+        assertEquals("getTransactionID", txId, batched.getTransactionId());
         assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, batched.getVersion());
 
         List<Modification> batchedMods = batched.getModifications();
index ad591a2..705f3c4 100644 (file)
@@ -2313,7 +2313,7 @@ public class ShardManagerTest extends AbstractActorTest {
 
         AbstractGenericCreator(Class<C> shardManagerClass) {
             this.shardManagerClass = shardManagerClass;
-            cluster(new MockClusterWrapper()).configuration(new MockConfiguration()).waitTillReadyCountdownLatch(ready)
+            cluster(new MockClusterWrapper()).configuration(new MockConfiguration()).waitTillReadyCountDownLatch(ready)
                     .primaryShardInfoCache(new PrimaryShardInfoFutureCache());
         }