Several warnings were suppressed via annotation with justification provided.
Other warnings that were fixed:
- remove redundant implements in several classes
- "The referenced methods have names that differ only by capitalization"
warnings. This is checked across all classes for consistency. The main
offender was getTransactionID vs. getTransactionId. I changed all methods
to getTransactionId and associated fields to transactionId.
- unsynchronized access to a field where access is synchronized
elsewhere (in DataTreeChangeListenerProxy and DatastoreContextIntrospector).
- catching Exception instead of catching more specific exception types that
are thrown from the try block.
- unconfirmed casts - verify via Preconditions check to avoid warning
- unnecessarily calling toString() on a String instance
- synchronizing an AtomicInteger instance (in ThreePhaseCommitCohortProxy) -
not an issue in this case but changed to synchronize a separate Object
in lieu of supressing the warning.
- unsynchronized to SimpleDateFormat which isn't thread-safe (in ShardStats).
- potential null-pointer access of 'shard' in ShardStats - changed to pass
'shard' to the ctor in lieu of setter.
- calling String#getBytes w/o specifying encoding (in DataTreeModificationOutput).
- privileged access to create ClassLoader in ActorSystemProviderImpl although not
likely a SecurityManager would ever be present.
Change-Id: I0a87208f3f200fbe4f78e950c21419fbab154d94
Signed-off-by: Tom Pantelis <tpanteli@brocade.com>
<propertyExpansion>checkstyle.violationSeverity=error</propertyExpansion>
</configuration>
</plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>findbugs-maven-plugin</artifactId>
+ <configuration>
+ <failOnError>true</failOnError>
+ </configuration>
+ </plugin>
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
import org.slf4j.LoggerFactory;
abstract class AbstractDOMBroker extends AbstractDOMTransactionFactory<DOMStore>
- implements DOMDataBroker, AutoCloseable {
+ implements DOMDataBroker {
private static final Logger LOG = LoggerFactory.getLogger(AbstractDOMBroker.class);
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
Futures.addCallback(commitFuture, futureCallback, MoreExecutors.directExecutor());
}
+ @SuppressFBWarnings(value = "BC_UNCONFIRMED_CAST_OF_RETURN_VALUE",
+ justification = "Pertains to the assignment of the 'clientException' var. FindBugs flags this as an "
+ + "uncomfirmed cast but the generic type in TransactionCommitFailedExceptionMapper is "
+ + "TransactionCommitFailedException and thus should be deemed as confirmed.")
private static void handleException(final AsyncNotifyingSettableFuture clientSubmitFuture,
final DOMDataWriteTransaction transaction,
final Collection<DOMStoreThreePhaseCommitCohort> cohorts,
import com.google.common.collect.ImmutableBiMap;
import com.google.common.collect.ImmutableBiMap.Builder;
import com.google.common.primitives.UnsignedLong;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
*
* @author Robert Varga
*/
+@SuppressFBWarnings(value = "NP_NONNULL_PARAM_VIOLATION",
+ justification = "Pertains to the NULL_FUTURE field below. Null is allowed and is intended")
final class ModuleShardBackendResolver extends BackendInfoResolver<ShardBackendInfo> {
private static final CompletableFuture<ShardBackendInfo> NULL_FUTURE = CompletableFuture.completedFuture(null);
private static final Logger LOG = LoggerFactory.getLogger(ModuleShardBackendResolver.class);
final class CohortEntry {
private final ReadWriteShardDataTreeTransaction transaction;
- private final TransactionIdentifier transactionID;
+ private final TransactionIdentifier transactionId;
private final short clientVersion;
private RuntimeException lastBatchedModificationsException;
private CohortEntry(final ReadWriteShardDataTreeTransaction transaction, final short clientVersion) {
this.transaction = Preconditions.checkNotNull(transaction);
- this.transactionID = transaction.getId();
+ this.transactionId = transaction.getId();
this.clientVersion = clientVersion;
}
private CohortEntry(final ShardDataTreeCohort cohort, final short clientVersion) {
this.cohort = Preconditions.checkNotNull(cohort);
- this.transactionID = cohort.getIdentifier();
+ this.transactionId = cohort.getIdentifier();
this.transaction = null;
this.clientVersion = clientVersion;
}
return new CohortEntry(cohort, clientVersion);
}
- TransactionIdentifier getTransactionID() {
- return transactionID;
+ TransactionIdentifier getTransactionId() {
+ return transactionId;
}
short getClientVersion() {
if (cohortDecorator != null) {
// Call the hook for unit tests.
- cohort = cohortDecorator.decorate(transactionID, cohort);
+ cohort = cohortDecorator.decorate(transactionId, cohort);
}
}
@Override
public String toString() {
final StringBuilder builder = new StringBuilder();
- builder.append("CohortEntry [transactionID=").append(transactionID).append(", doImmediateCommit=")
+ builder.append("CohortEntry [transactionId=").append(transactionId).append(", doImmediateCommit=")
.append(doImmediateCommit).append("]");
return builder.toString();
}
import akka.actor.Props;
import akka.japi.Creator;
import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.cluster.datastore.messages.DataChanged;
import org.opendaylight.controller.cluster.datastore.messages.DataChangedReply;
private static class DataChangeListenerCreator implements Creator<DataChangeListener> {
private static final long serialVersionUID = 1L;
+ @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but we don't "
+ + "create remote instances of this actor and thus don't need it to be Serializable.")
final AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener;
DataChangeListenerCreator(
import akka.actor.PoisonPill;
import akka.actor.Props;
import akka.japi.Creator;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistration;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistrationReply;
private static class DataChangeListenerRegistrationCreator
implements Creator<DataChangeListenerRegistrationActor> {
private static final long serialVersionUID = 1L;
+
+ @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but we don't "
+ + "create remote instances of this actor and thus don't need it to be Serializable.")
final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
NormalizedNode<?, ?>>> registration;
import akka.actor.Props;
import akka.japi.Creator;
import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
private static final class DataTreeChangeListenerCreator implements Creator<DataTreeChangeListenerActor> {
private static final long serialVersionUID = 1L;
+
+ @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but we don't "
+ + "create remote instances of this actor and thus don't need it to be Serializable.")
private final DOMDataTreeChangeListener listener;
DataTreeChangeListenerCreator(final DOMDataTreeChangeListener listener) {
}
@VisibleForTesting
- ActorSelection getListenerRegistrationActor() {
+ synchronized ActorSelection getListenerRegistrationActor() {
return listenerRegistrationActor;
}
import akka.actor.Props;
import akka.japi.Creator;
import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistration;
import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistrationReply;
private static final class DataTreeChangeListenerRegistrationCreator
implements Creator<DataTreeChangeListenerRegistrationActor> {
private static final long serialVersionUID = 1L;
+
+ @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but we don't "
+ + "create remote instances of this actor and thus don't need it to be Serializable.")
final ListenerRegistration<DOMDataTreeChangeListener> registration;
DataTreeChangeListenerRegistrationCreator(ListenerRegistration<DOMDataTreeChangeListener> registration) {
import java.util.List;
import java.util.Map;
import java.util.Set;
+import javax.annotation.concurrent.GuardedBy;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.text.WordUtils;
import org.opendaylight.controller.cluster.datastore.DatastoreContext.Builder;
propertyName, type));
}
+ @GuardedBy(value = "this")
private DatastoreContext context;
+ @GuardedBy(value = "this")
private Map<String, Object> currentProperties;
public DatastoreContextIntrospector(DatastoreContext context) {
this.context = context;
}
- public DatastoreContext getContext() {
+ public synchronized DatastoreContext getContext() {
return context;
}
for (DatastoreSnapshot snapshot: snapshots) {
datastoreSnapshots.put(snapshot.getType(), snapshot);
}
- } catch (Exception e) {
+ } catch (ClassNotFoundException | IOException e) {
LOG.error("Error reading clustered datastore restore file {}", restoreFile, e);
} finally {
if (!restoreFile.delete()) {
ShardManagerCreator creator = new ShardManagerCreator().cluster(cluster).configuration(configuration)
.datastoreContextFactory(datastoreContextFactory)
- .waitTillReadyCountdownLatch(waitTillReadyCountDownLatch)
+ .waitTillReadyCountDownLatch(waitTillReadyCountDownLatch)
.primaryShardInfoCache(primaryShardInfoCache)
.restoreFromSnapshot(restoreFromSnapshot);
@Override
public LocalThreePhaseCommitCohort onTransactionReady(@Nonnull DOMStoreWriteTransaction tx,
@Nullable Exception operationError) {
+ Preconditions.checkArgument(tx instanceof SnapshotBackedWriteTransaction);
if (operationError != null) {
return new LocalChainThreePhaseCommitCohort((SnapshotBackedWriteTransaction<TransactionIdentifier>)tx,
operationError);
@Override
public LocalThreePhaseCommitCohort onTransactionReady(@Nonnull DOMStoreWriteTransaction tx,
@Nullable Exception operationError) {
+ Preconditions.checkArgument(tx instanceof SnapshotBackedWriteTransaction);
if (operationError != null) {
return new LocalThreePhaseCommitCohort(actorContext, leader,
(SnapshotBackedWriteTransaction<TransactionIdentifier>)tx, operationError);
treeChangeListenerPublisher, dataChangeListenerPublisher, name);
}
- shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString(),
- datastoreContext.getDataStoreMXBeanType());
- shardMBean.setShard(this);
+ shardMBean = ShardMBeanFactory.getShardStatsMBean(name, datastoreContext.getDataStoreMXBeanType(), this);
if (isMetricsCaptureEnabled()) {
getContext().become(new MeteringBehavior(this));
setTransactionCommitTimeout();
// create a notifier actor for each cluster member
- roleChangeNotifier = createRoleChangeNotifier(name.toString());
+ roleChangeNotifier = createRoleChangeNotifier(name);
appendEntriesReplyTracker = new MessageTracker(AppendEntriesReply.class,
getRaftActorContext().getConfigParams().getIsolatedCheckIntervalInMillis());
updateSchemaContext((UpdateSchemaContext) message);
} else if (message instanceof PeerAddressResolved) {
PeerAddressResolved resolved = (PeerAddressResolved) message;
- setPeerAddress(resolved.getPeerId().toString(),
- resolved.getPeerAddress());
+ setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress());
} else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
store.checkForExpiredTransactions(transactionCommitTimeout);
commitCoordinator.checkForExpiredTransactions(transactionCommitTimeout, this);
private void handleCommitTransaction(final CommitTransaction commit) {
if (isLeader()) {
- commitCoordinator.handleCommit(commit.getTransactionID(), getSender(), this);
+ commitCoordinator.handleCommit(commit.getTransactionId(), getSender(), this);
} else {
ActorSelection leader = getLeader();
if (leader == null) {
messageRetrySupport.addMessageToRetry(commit, getSender(),
- "Could not commit transaction " + commit.getTransactionID());
+ "Could not commit transaction " + commit.getTransactionId());
} else {
LOG.debug("{}: Forwarding CommitTransaction to leader {}", persistenceId(), leader);
leader.forward(commit, getContext());
}
private void handleCanCommitTransaction(final CanCommitTransaction canCommit) {
- LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionID());
+ LOG.debug("{}: Can committing transaction {}", persistenceId(), canCommit.getTransactionId());
if (isLeader()) {
- commitCoordinator.handleCanCommit(canCommit.getTransactionID(), getSender(), this);
+ commitCoordinator.handleCanCommit(canCommit.getTransactionId(), getSender(), this);
} else {
ActorSelection leader = getLeader();
if (leader == null) {
messageRetrySupport.addMessageToRetry(canCommit, getSender(),
- "Could not canCommit transaction " + canCommit.getTransactionID());
+ "Could not canCommit transaction " + canCommit.getTransactionId());
} else {
LOG.debug("{}: Forwarding CanCommitTransaction to leader {}", persistenceId(), leader);
leader.forward(canCommit, getContext());
commitCoordinator.handleBatchedModifications(batched, sender, this);
} catch (Exception e) {
LOG.error("{}: Error handling BatchedModifications for Tx {}", persistenceId(),
- batched.getTransactionID(), e);
+ batched.getTransactionId(), e);
sender.tell(new akka.actor.Status.Failure(e), getSelf());
}
}
ActorSelection leader = getLeader();
if (!isLeaderActive || leader == null) {
messageRetrySupport.addMessageToRetry(batched, getSender(),
- "Could not commit transaction " + batched.getTransactionID());
+ "Could not commit transaction " + batched.getTransactionId());
} else {
// If this is not the first batch and leadership changed in between batched messages,
// we need to reconstruct previous BatchedModifications from the transaction
@SuppressWarnings("checkstyle:IllegalCatch")
private void handleReadyLocalTransaction(final ReadyLocalTransaction message) {
- LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionID());
+ LOG.debug("{}: handleReadyLocalTransaction for {}", persistenceId(), message.getTransactionId());
boolean isLeaderActive = isLeaderActive();
if (isLeader() && isLeaderActive) {
commitCoordinator.handleReadyLocalTransaction(message, getSender(), this);
} catch (Exception e) {
LOG.error("{}: Error handling ReadyLocalTransaction for Tx {}", persistenceId(),
- message.getTransactionID(), e);
+ message.getTransactionId(), e);
getSender().tell(new akka.actor.Status.Failure(e), getSelf());
}
} else {
ActorSelection leader = getLeader();
if (!isLeaderActive || leader == null) {
messageRetrySupport.addMessageToRetry(message, getSender(),
- "Could not commit transaction " + message.getTransactionID());
+ "Could not commit transaction " + message.getTransactionId());
} else {
LOG.debug("{}: Forwarding ReadyLocalTransaction to leader {}", persistenceId(), leader);
message.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
}
private void handleForwardedReadyTransaction(final ForwardedReadyTransaction forwardedReady) {
- LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionID());
+ LOG.debug("{}: handleForwardedReadyTransaction for {}", persistenceId(), forwardedReady.getTransactionId());
boolean isLeaderActive = isLeaderActive();
if (isLeader() && isLeaderActive) {
ActorSelection leader = getLeader();
if (!isLeaderActive || leader == null) {
messageRetrySupport.addMessageToRetry(forwardedReady, getSender(),
- "Could not commit transaction " + forwardedReady.getTransactionID());
+ "Could not commit transaction " + forwardedReady.getTransactionId());
} else {
LOG.debug("{}: Forwarding ForwardedReadyTransaction to leader {}", persistenceId(), leader);
- ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionID(),
+ ReadyLocalTransaction readyLocal = new ReadyLocalTransaction(forwardedReady.getTransactionId(),
forwardedReady.getTransaction().getSnapshot(), forwardedReady.isDoImmediateCommit());
readyLocal.setRemoteVersion(getCurrentBehavior().getLeaderPayloadVersion());
leader.forward(readyLocal, getContext());
}
private void handleAbortTransaction(final AbortTransaction abort) {
- doAbortTransaction(abort.getTransactionID(), getSender());
+ doAbortTransaction(abort.getTransactionId(), getSender());
}
void doAbortTransaction(final TransactionIdentifier transactionID, final ActorRef sender) {
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
import javax.annotation.Nonnull;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
void handleForwardedReadyTransaction(final ForwardedReadyTransaction ready, final ActorRef sender,
final Shard shard) {
log.debug("{}: Readying transaction {}, client version {}", name,
- ready.getTransactionID(), ready.getTxnClientVersion());
+ ready.getTransactionId(), ready.getTxnClientVersion());
final ShardDataTreeCohort cohort = ready.getTransaction().ready();
final CohortEntry cohortEntry = CohortEntry.createReady(cohort, ready.getTxnClientVersion());
- cohortCache.put(cohortEntry.getTransactionID(), cohortEntry);
+ cohortCache.put(cohortEntry.getTransactionId(), cohortEntry);
if (ready.isDoImmediateCommit()) {
cohortEntry.setDoImmediateCommit(true);
* @param sender the sender of the message
*/
void handleBatchedModifications(final BatchedModifications batched, final ActorRef sender, final Shard shard) {
- CohortEntry cohortEntry = cohortCache.get(batched.getTransactionID());
+ CohortEntry cohortEntry = cohortCache.get(batched.getTransactionId());
if (cohortEntry == null) {
- cohortEntry = CohortEntry.createOpen(dataTree.newReadWriteTransaction(batched.getTransactionID()),
+ cohortEntry = CohortEntry.createOpen(dataTree.newReadWriteTransaction(batched.getTransactionId()),
batched.getVersion());
- cohortCache.put(cohortEntry.getTransactionID(), cohortEntry);
+ cohortCache.put(cohortEntry.getTransactionId(), cohortEntry);
}
if (log.isDebugEnabled()) {
log.debug("{}: Applying {} batched modifications for Tx {}", name,
- batched.getModifications().size(), batched.getTransactionID());
+ batched.getModifications().size(), batched.getTransactionId());
}
cohortEntry.applyModifications(batched.getModifications());
if (batched.isReady()) {
if (cohortEntry.getLastBatchedModificationsException() != null) {
- cohortCache.remove(cohortEntry.getTransactionID());
+ cohortCache.remove(cohortEntry.getTransactionId());
throw cohortEntry.getLastBatchedModificationsException();
}
if (cohortEntry.getTotalBatchedModificationsReceived() != batched.getTotalMessagesSent()) {
- cohortCache.remove(cohortEntry.getTransactionID());
+ cohortCache.remove(cohortEntry.getTransactionId());
throw new IllegalStateException(String.format(
"The total number of batched messages received %d does not match the number sent %d",
cohortEntry.getTotalBatchedModificationsReceived(), batched.getTotalMessagesSent()));
if (log.isDebugEnabled()) {
log.debug("{}: Readying Tx {}, client version {}", name,
- batched.getTransactionID(), batched.getVersion());
+ batched.getTransactionId(), batched.getVersion());
}
cohortEntry.setDoImmediateCommit(batched.isDoCommitOnReady());
* @param shard the transaction's shard actor
*/
void handleReadyLocalTransaction(final ReadyLocalTransaction message, final ActorRef sender, final Shard shard) {
- final ShardDataTreeCohort cohort = dataTree.createReadyCohort(message.getTransactionID(),
+ final ShardDataTreeCohort cohort = dataTree.createReadyCohort(message.getTransactionId(),
message.getModification());
final CohortEntry cohortEntry = CohortEntry.createReady(cohort, DataStoreVersions.CURRENT_VERSION);
- cohortCache.put(cohortEntry.getTransactionID(), cohortEntry);
+ cohortCache.put(cohortEntry.getTransactionId(), cohortEntry);
cohortEntry.setDoImmediateCommit(message.isDoCommitOnReady());
- log.debug("{}: Applying local modifications for Tx {}", name, message.getTransactionID());
+ log.debug("{}: Applying local modifications for Tx {}", name, message.getTransactionId());
if (message.isDoCommitOnReady()) {
cohortEntry.setReplySender(sender);
Collection<BatchedModifications> createForwardedBatchedModifications(final BatchedModifications from,
final int maxModificationsPerBatch) {
- CohortEntry cohortEntry = cohortCache.remove(from.getTransactionID());
+ CohortEntry cohortEntry = cohortCache.remove(from.getTransactionId());
if (cohortEntry == null || cohortEntry.getTransaction() == null) {
return Collections.singletonList(from);
}
protected BatchedModifications getModifications() {
if (newModifications.isEmpty()
|| newModifications.getLast().getModifications().size() >= maxModificationsPerBatch) {
- newModifications.add(new BatchedModifications(from.getTransactionID(), from.getVersion()));
+ newModifications.add(new BatchedModifications(from.getTransactionId(), from.getVersion()));
}
return newModifications.getLast();
cohortEntry.canCommit(new FutureCallback<Void>() {
@Override
public void onSuccess(final Void result) {
- log.debug("{}: canCommit for {}: success", name, cohortEntry.getTransactionID());
+ log.debug("{}: canCommit for {}: success", name, cohortEntry.getTransactionId());
if (cohortEntry.isDoImmediateCommit()) {
doCommit(cohortEntry);
@Override
public void onFailure(final Throwable failure) {
log.debug("{}: An exception occurred during canCommit for {}: {}", name,
- cohortEntry.getTransactionID(), failure);
+ cohortEntry.getTransactionId(), failure);
- cohortCache.remove(cohortEntry.getTransactionID());
+ cohortCache.remove(cohortEntry.getTransactionId());
cohortEntry.getReplySender().tell(new Failure(failure), cohortEntry.getShard().self());
}
});
}
private void doCommit(final CohortEntry cohortEntry) {
- log.debug("{}: Committing transaction {}", name, cohortEntry.getTransactionID());
+ log.debug("{}: Committing transaction {}", name, cohortEntry.getTransactionId());
// We perform the preCommit phase here atomically with the commit phase. This is an
// optimization to eliminate the overhead of an extra preCommit message. We lose front-end
@Override
public void onFailure(final Throwable failure) {
log.error("{} An exception occurred while preCommitting transaction {}", name,
- cohortEntry.getTransactionID(), failure);
+ cohortEntry.getTransactionId(), failure);
- cohortCache.remove(cohortEntry.getTransactionID());
+ cohortCache.remove(cohortEntry.getTransactionId());
cohortEntry.getReplySender().tell(new Failure(failure), cohortEntry.getShard().self());
}
});
}
private void finishCommit(@Nonnull final ActorRef sender, @Nonnull final CohortEntry cohortEntry) {
- log.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionID());
+ log.debug("{}: Finishing commit for transaction {}", persistenceId(), cohortEntry.getTransactionId());
cohortEntry.commit(new FutureCallback<UnsignedLong>() {
@Override
public void onSuccess(final UnsignedLong result) {
- final TransactionIdentifier txId = cohortEntry.getTransactionID();
+ final TransactionIdentifier txId = cohortEntry.getTransactionId();
log.debug("{}: Transaction {} committed as {}, sending response to {}", persistenceId(), txId, result,
sender);
- cohortCache.remove(cohortEntry.getTransactionID());
+ cohortCache.remove(cohortEntry.getTransactionId());
sender.tell(CommitTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(),
cohortEntry.getShard().self());
}
@Override
public void onFailure(final Throwable failure) {
log.error("{}, An exception occurred while committing transaction {}", persistenceId(),
- cohortEntry.getTransactionID(), failure);
+ cohortEntry.getTransactionId(), failure);
- cohortCache.remove(cohortEntry.getTransactionID());
+ cohortCache.remove(cohortEntry.getTransactionId());
sender.tell(new Failure(failure), cohortEntry.getShard().self());
}
});
if (sender != null) {
sender.tell(AbortTransactionReply.instance(cohortEntry.getClientVersion()).toSerializable(), self);
}
- } catch (Exception e) {
+ } catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error("{}: An exception happened during abort", name, e);
if (sender != null) {
}
// Allocate a new message
- final BatchedModifications ret = new BatchedModifications(cohortEntry.getTransactionID(),
+ final BatchedModifications ret = new BatchedModifications(cohortEntry.getTransactionId(),
cohortEntry.getClientVersion());
newMessages.add(ret);
return ret;
switch (cohort.getState()) {
case CAN_COMMIT_COMPLETE:
case CAN_COMMIT_PENDING:
- messages.add(new CanCommitTransaction(cohortEntry.getTransactionID(),
+ messages.add(new CanCommitTransaction(cohortEntry.getTransactionId(),
cohortEntry.getClientVersion()));
break;
case PRE_COMMIT_COMPLETE:
case PRE_COMMIT_PENDING:
- messages.add(new CommitTransaction(cohortEntry.getTransactionID(),
+ messages.add(new CommitTransaction(cohortEntry.getTransactionId(),
cohortEntry.getClientVersion()));
break;
default:
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableMap.Builder;
import com.google.common.primitives.UnsignedLong;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.File;
import java.io.IOException;
import java.util.AbstractMap.SimpleEntry;
return cohort;
}
+ @SuppressFBWarnings(value = {"RV_RETURN_VALUE_IGNORED", "DB_DUPLICATE_SWITCH_CLAUSES"},
+ justification = "See inline comments below.")
void checkForExpiredTransactions(final long transactionCommitTimeoutMillis) {
final long timeout = TimeUnit.MILLISECONDS.toNanos(transactionCommitTimeoutMillis);
final long now = shard.ticker().read();
pendingTransactions.poll().cohort.failedCanCommit(new TimeoutException());
break;
case CAN_COMMIT_COMPLETE:
+ // The suppression of the FindBugs "DB_DUPLICATE_SWITCH_CLAUSES" warning pertains to this clause
+ // whose code is duplicated with PRE_COMMIT_COMPLETE. The clauses aren't combined in case the code
+ // in PRE_COMMIT_COMPLETE is changed.
pendingTransactions.poll().cohort.reportFailure(new TimeoutException());
break;
case PRE_COMMIT_PENDING:
case FAILED:
case READY:
default:
+ // The suppression of the FindBugs "RV_RETURN_VALUE_IGNORED" warning pertains to this line. In
+ // this case, we just want to drop the current entry that expired and thus ignore the return value.
+ // In fact we really shouldn't hit this case but we handle all enums for completeness.
pendingTransactions.poll();
}
}
}
+ @SuppressFBWarnings(value = "RV_RETURN_VALUE_IGNORED", justification = "See inline comment below.")
void startAbort(final SimpleShardDataTreeCohort cohort) {
final Iterator<CommitEntry> it = pendingTransactions.iterator();
if (!it.hasNext()) {
if (cohort.getState() != State.COMMIT_PENDING) {
LOG.debug("{}: aborted head of queue {} in state {}", logContext, cohort.getIdentifier(),
cohort.getIdentifier());
+
+ // The suppression of the FindBugs "RV_RETURN_VALUE_IGNORED" warning pertains to this line. In
+ // this case, we've already obtained the head of the queue above via the Iterator and we just want to
+ // remove it here.
pendingTransactions.poll();
processNextTransaction();
} else {
import akka.japi.Creator;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
public abstract class ShardTransaction extends AbstractUntypedActorWithMetering {
private final ActorRef shardActor;
private final ShardStats shardStats;
- private final TransactionIdentifier transactionID;
+ private final TransactionIdentifier transactionId;
- protected ShardTransaction(ActorRef shardActor, ShardStats shardStats, TransactionIdentifier transactionID) {
+ protected ShardTransaction(ActorRef shardActor, ShardStats shardStats, TransactionIdentifier transactionId) {
super("shard-tx"); //actor name override used for metering. This does not change the "real" actor name
this.shardActor = shardActor;
this.shardStats = shardStats;
- this.transactionID = Preconditions.checkNotNull(transactionID);
+ this.transactionId = Preconditions.checkNotNull(transactionId);
}
public static Props props(TransactionType type, AbstractShardDataTreeTransaction<?> transaction,
return shardActor;
}
- protected final TransactionIdentifier getTransactionID() {
- return transactionID;
+ protected final TransactionIdentifier getTransactionId() {
+ return transactionId;
}
@Override
if (CloseTransaction.isSerializedType(message)) {
closeTransaction(true);
} else if (message instanceof ReceiveTimeout) {
- LOG.debug("Got ReceiveTimeout for inactivity - closing transaction {}", transactionID);
+ LOG.debug("Got ReceiveTimeout for inactivity - closing transaction {}", transactionId);
closeTransaction(false);
} else {
unknownMessage(message);
getSender().tell(new DataExistsReply(exists, message.getVersion()).toSerializable(), getSelf());
}
+ @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "Some fields are not Serializable but we don't "
+ + "create remote instances of this actor and thus don't need it to be Serializable.")
private static class ShardTransactionCreator implements Creator<ShardTransaction> {
private static final long serialVersionUID = 1L;
}
private void readyTransaction(boolean returnSerialized, boolean doImmediateCommit, short clientTxVersion) {
- TransactionIdentifier transactionID = getTransactionID();
+ TransactionIdentifier transactionID = getTransactionId();
LOG.debug("readyTransaction : {}", transactionID);
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeoutException;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
-import org.opendaylight.yangtools.concepts.Identifiable;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateTip;
import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
import org.slf4j.LoggerFactory;
import scala.concurrent.Future;
-final class SimpleShardDataTreeCohort extends ShardDataTreeCohort implements Identifiable<TransactionIdentifier> {
+final class SimpleShardDataTreeCohort extends ShardDataTreeCohort {
private static final Logger LOG = LoggerFactory.getLogger(SimpleShardDataTreeCohort.class);
private static final ListenableFuture<Void> VOID_FUTURE = Futures.immediateFuture(null);
private final DataTreeModification transaction;
}
final AtomicInteger completed = new AtomicInteger(cohorts.size());
+ final Object lock = new Object();
for (final CohortInfo info: cohorts) {
info.getActorFuture().onComplete(new OnComplete<ActorSelection>() {
@Override
public void onComplete(Throwable failure, ActorSelection actor) {
- synchronized (completed) {
+ synchronized (lock) {
boolean done = completed.decrementAndGet() == 0;
if (failure != null) {
LOG.debug("Tx {}: a cohort Future failed", transactionId, failure);
private static Map<String, String> createNamespaceToModuleName(Iterable<ModuleConfig> moduleConfigs) {
final ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
for (ModuleConfig moduleConfig : moduleConfigs) {
- if (moduleConfig.getNameSpace() != null) {
- builder.put(moduleConfig.getNameSpace(), moduleConfig.getName());
+ if (moduleConfig.getNamespace() != null) {
+ builder.put(moduleConfig.getNamespace(), moduleConfig.getName());
}
}
updateModuleConfigMap(moduleConfig);
namespaceToModuleName = ImmutableMap.<String, String>builder().putAll(namespaceToModuleName)
- .put(moduleConfig.getNameSpace(), moduleConfig.getName()).build();
+ .put(moduleConfig.getNamespace(), moduleConfig.getName()).build();
allShardNames = ImmutableSet.<String>builder().addAll(allShardNames).add(config.getShardName()).build();
}
*/
public class ModuleConfig {
private final String name;
- private final String nameSpace;
+ private final String namespace;
private final ShardStrategy shardStrategy;
private final Map<String, ShardConfig> shardConfigs;
- private ModuleConfig(String name, String nameSpace, ShardStrategy shardStrategy,
+ private ModuleConfig(String name, String namespace, ShardStrategy shardStrategy,
Map<String, ShardConfig> shardConfigs) {
this.name = name;
- this.nameSpace = nameSpace;
+ this.namespace = namespace;
this.shardStrategy = shardStrategy;
this.shardConfigs = shardConfigs;
}
}
@Nullable
- public String getNameSpace() {
- return nameSpace;
+ public String getNamespace() {
+ return namespace;
}
@Nullable
private Builder(ModuleConfig moduleConfig) {
this.name = moduleConfig.getName();
- this.nameSpace = moduleConfig.getNameSpace();
+ this.nameSpace = moduleConfig.getNamespace();
this.shardStrategy = moduleConfig.getShardStrategy();
for (ShardConfig shardConfig : moduleConfig.getShardConfigs()) {
shardConfigs.put(shardConfig.getName(), shardConfig);
if (failure != null) {
LOG.debug("Error sending message {} to {}", message, shardActor, failure);
} else {
- LOG.debug("{} message to {} succeeded", message, shardActor, failure);
+ LOG.debug("{} message to {} succeeded", message, shardActor);
}
}
}, context.getClientDispatcher());
import akka.actor.Props;
import akka.japi.Creator;
import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipChange;
import org.opendaylight.mdsal.eos.dom.api.DOMEntityOwnershipListener;
private static final class EntityOwnershipListenerCreator implements Creator<EntityOwnershipListenerActor> {
private static final long serialVersionUID = 1L;
+ @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but we don't "
+ + "create remote instances of this actor and thus don't need it to be Serializable.")
private final DOMEntityOwnershipListener listener;
EntityOwnershipListenerCreator(DOMEntityOwnershipListener listener) {
void tryCommitModifications(final BatchedModifications modifications) {
if (isLeader()) {
LOG.debug("{}: Committing BatchedModifications {} locally", persistenceId(),
- modifications.getTransactionID());
+ modifications.getTransactionId());
// Note that it's possible the commit won't get consensus and will timeout and not be applied
// to the state. However we don't need to retry it in that case b/c it will be committed to
possiblyRemoveAllInitialCandidates(leader);
LOG.debug("{}: Sending BatchedModifications {} to leader {}", persistenceId(),
- modifications.getTransactionID(), leader);
+ modifications.getTransactionId(), leader);
Future<Object> future = Patterns.ask(leader, modifications, TimeUnit.SECONDS.toMillis(
getDatastoreContext().getShardTransactionCommitTimeoutInSeconds()));
}
if (shard.hasLeader()) {
- log.debug("Retrying commit for BatchedModifications {}", inflightCommit.getTransactionID());
+ log.debug("Retrying commit for BatchedModifications {}", inflightCommit.getTransactionId());
shard.tryCommitModifications(inflightCommit);
} else {
return;
}
- log.debug("Inflight BatchedModifications {} commit failed", inflightCommit.getTransactionID(), cause);
+ log.debug("Inflight BatchedModifications {} commit failed", inflightCommit.getTransactionId(), cause);
if (!(cause instanceof NoShardLeaderException)) {
// If the failure is other than NoShardLeaderException the commit may have been partially
FiniteDuration duration = shard.getDatastoreContext().getShardRaftConfig().getElectionTimeOutInterval();
log.debug("Scheduling retry for BatchedModifications commit {} in {}",
- inflightCommit.getTransactionID(), duration);
+ inflightCommit.getTransactionId(), duration);
retryCommitSchedule = shard.getContext().system().scheduler().scheduleOnce(duration, shard.getSelf(),
COMMIT_RETRY_MESSAGE, shard.getContext().dispatcher(), ActorRef.noSender());
retryCommitSchedule.cancel();
}
- log.debug("BatchedModifications commit {} succeeded", inflightCommit.getTransactionID());
+ log.debug("BatchedModifications commit {} succeeded", inflightCommit.getTransactionId());
inflightCommit = null;
commitNextBatch(shard);
}
}
- log.debug("Committing next BatchedModifications {}, size {}", inflightCommit.getTransactionID(),
+ log.debug("Committing next BatchedModifications {}, size {}", inflightCommit.getTransactionId(),
inflightCommit.getModifications().size());
shard.tryCommitModifications(inflightCommit);
@Nullable
private BatchedModifications pruneModifications(BatchedModifications toPrune) {
- BatchedModifications prunedModifications = new BatchedModifications(toPrune.getTransactionID(),
+ BatchedModifications prunedModifications = new BatchedModifications(toPrune.getTransactionId(),
toPrune.getVersion());
prunedModifications.setDoCommitOnReady(toPrune.isDoCommitOnReady());
prunedModifications.setReady(toPrune.isReady());
*/
package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
+import javax.annotation.Nonnull;
+import org.opendaylight.controller.cluster.datastore.Shard;
/**
* Factory for creating ShardStats mbeans.
*/
public class ShardMBeanFactory {
- public static ShardStats getShardStatsMBean(final String shardName, final String mxBeanType) {
+ public static ShardStats getShardStatsMBean(final String shardName, final String mxBeanType,
+ @Nonnull final Shard shard) {
String finalMXBeanType = mxBeanType != null ? mxBeanType : "DistDataStore";
- ShardStats shardStatsMBeanImpl = new ShardStats(shardName, finalMXBeanType);
+ ShardStats shardStatsMBeanImpl = new ShardStats(shardName, finalMXBeanType, shard);
shardStatsMBeanImpl.registerMBean();
return shardStatsMBeanImpl;
}
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
+import javax.annotation.Nullable;
import org.opendaylight.controller.cluster.datastore.Shard;
import org.opendaylight.controller.cluster.raft.base.messages.InitiateCaptureSnapshot;
import org.opendaylight.controller.cluster.raft.client.messages.FollowerInfo;
* @author Basheeruddin syedbahm@cisco.com
*/
public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
- public static String JMX_CATEGORY_SHARD = "Shards";
+ public static final String JMX_CATEGORY_SHARD = "Shards";
private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
private boolean followerInitialSyncStatus = false;
- private Shard shard;
+ private final Shard shard;
private String statRetrievalError;
private long lastLeadershipChangeTime;
- public ShardStats(final String shardName, final String mxBeanType) {
+ public ShardStats(final String shardName, final String mxBeanType, @Nullable final Shard shard) {
super(shardName, mxBeanType, JMX_CATEGORY_SHARD);
- }
-
- public void setShard(Shard shard) {
this.shard = shard;
}
@Override
public String getLastCommittedTransactionTime() {
- return DATE_FORMAT.format(new Date(lastCommittedTransactionTime));
+ synchronized (DATE_FORMAT) {
+ return DATE_FORMAT.format(new Date(lastCommittedTransactionTime));
+ }
}
@Override
@Override
public String getLastLeadershipChangeTime() {
- return DATE_FORMAT.format(new Date(lastLeadershipChangeTime));
+ synchronized (DATE_FORMAT) {
+ return DATE_FORMAT.format(new Date(lastLeadershipChangeTime));
+ }
}
@Override
public int getPendingTxCommitQueueSize() {
- return shard.getPendingTxCommitQueueSize();
+ return shard != null ? shard.getPendingTxCommitQueueSize() : -1;
}
@Override
public int getTxCohortCacheSize() {
- return shard.getCohortCacheSize();
+ return shard != null ? shard.getCohortCacheSize() : -1;
}
@Override
public abstract class AbstractThreePhaseCommitMessage extends VersionedExternalizableMessage {
private static final long serialVersionUID = 1L;
- private TransactionIdentifier transactionID;
+ private TransactionIdentifier transactionId;
protected AbstractThreePhaseCommitMessage() {
}
- protected AbstractThreePhaseCommitMessage(final TransactionIdentifier transactionID, final short version) {
+ protected AbstractThreePhaseCommitMessage(final TransactionIdentifier transactionId, final short version) {
super(version);
- this.transactionID = Preconditions.checkNotNull(transactionID);
+ this.transactionId = Preconditions.checkNotNull(transactionId);
}
- public TransactionIdentifier getTransactionID() {
- return transactionID;
+ public TransactionIdentifier getTransactionId() {
+ return transactionId;
}
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
super.readExternal(in);
- transactionID = TransactionIdentifier.readFrom(in);
+ transactionId = TransactionIdentifier.readFrom(in);
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
super.writeExternal(out);
- transactionID.writeTo(out);
+ transactionId.writeTo(out);
}
@Override
public String toString() {
- return getClass().getSimpleName() + " [transactionID=" + transactionID + ", version=" + getVersion() + "]";
+ return getClass().getSimpleName() + " [transactionId=" + transactionId + ", version=" + getVersion() + "]";
}
}
private boolean ready;
private boolean doCommitOnReady;
private int totalMessagesSent;
- private TransactionIdentifier transactionID;
+ private TransactionIdentifier transactionId;
public BatchedModifications() {
}
- public BatchedModifications(TransactionIdentifier transactionID, short version) {
+ public BatchedModifications(TransactionIdentifier transactionId, short version) {
super(version);
- this.transactionID = Preconditions.checkNotNull(transactionID, "transactionID can't be null");
+ this.transactionId = Preconditions.checkNotNull(transactionId, "transactionID can't be null");
}
public boolean isReady() {
this.totalMessagesSent = totalMessagesSent;
}
- public TransactionIdentifier getTransactionID() {
- return transactionID;
+ public TransactionIdentifier getTransactionId() {
+ return transactionId;
}
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
super.readExternal(in);
- transactionID = TransactionIdentifier.readFrom(in);
+ transactionId = TransactionIdentifier.readFrom(in);
ready = in.readBoolean();
totalMessagesSent = in.readInt();
doCommitOnReady = in.readBoolean();
@Override
public void writeExternal(ObjectOutput out) throws IOException {
super.writeExternal(out);
- transactionID.writeTo(out);
+ transactionId.writeTo(out);
out.writeBoolean(ready);
out.writeInt(totalMessagesSent);
out.writeBoolean(doCommitOnReady);
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
- builder.append("BatchedModifications [transactionID=").append(transactionID).append(", ready=").append(ready)
+ builder.append("BatchedModifications [transactionId=").append(transactionId).append(", ready=").append(ready)
.append(", totalMessagesSent=").append(totalMessagesSent).append(", modifications size=")
.append(getModifications().size()).append("]");
return builder.toString();
package org.opendaylight.controller.cluster.datastore.messages;
import com.google.common.base.Preconditions;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.Serializable;
import java.util.List;
import javax.annotation.Nonnull;
private final byte[] shardManagerSnapshot;
private final List<ShardSnapshot> shardSnapshots;
+ @SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "Stores a reference to an externally mutable byte[] "
+ + "object but this is OK since this class is merely a DTO and does not process byte[] internally. "
+ + "Also it would be inefficient to create a return copy as the byte[] could be large.")
public DatastoreSnapshot(@Nonnull String type, @Nullable byte[] shardManagerSnapshot,
@Nonnull List<ShardSnapshot> shardSnapshots) {
this.type = Preconditions.checkNotNull(type);
return type;
}
+ @SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "Exposes a mutable object stored in a field but "
+ + "this is OK since this class is merely a DTO and does not process byte[] internally. "
+ + "Also it would be inefficient to create a return copy as the byte[] could be large.")
@Nullable
public byte[] getShardManagerSnapshot() {
return shardManagerSnapshot;
return name;
}
+ @SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "Exposes a mutable object stored in a field but "
+ + "this is OK since this class is merely a DTO and does not process byte[] internally. "
+ + "Also it would be inefficient to create a return copy as the byte[] could be large.")
@Nonnull
public byte[] getSnapshot() {
return snapshot;
* @author Thomas Pantelis
*/
public class ForwardedReadyTransaction {
- private final TransactionIdentifier transactionID;
+ private final TransactionIdentifier transactionId;
private final ReadWriteShardDataTreeTransaction transaction;
private final boolean doImmediateCommit;
private final short txnClientVersion;
- public ForwardedReadyTransaction(TransactionIdentifier transactionID, short txnClientVersion,
+ public ForwardedReadyTransaction(TransactionIdentifier transactionId, short txnClientVersion,
ReadWriteShardDataTreeTransaction transaction, boolean doImmediateCommit) {
- this.transactionID = Preconditions.checkNotNull(transactionID);
+ this.transactionId = Preconditions.checkNotNull(transactionId);
this.transaction = Preconditions.checkNotNull(transaction);
this.txnClientVersion = txnClientVersion;
this.doImmediateCommit = doImmediateCommit;
}
- public TransactionIdentifier getTransactionID() {
- return transactionID;
+ public TransactionIdentifier getTransactionId() {
+ return transactionId;
}
public ReadWriteShardDataTreeTransaction getTransaction() {
@Override
public String toString() {
- return "ForwardedReadyTransaction [transactionID=" + transactionID + ", doImmediateCommit=" + doImmediateCommit
+ return "ForwardedReadyTransaction [transactionId=" + transactionId + ", doImmediateCommit=" + doImmediateCommit
+ ", txnClientVersion=" + txnClientVersion + "]";
}
}
*/
public final class ReadyLocalTransaction {
private final DataTreeModification modification;
- private final TransactionIdentifier transactionID;
+ private final TransactionIdentifier transactionId;
private final boolean doCommitOnReady;
// The version of the remote system used only when needing to convert to BatchedModifications.
private short remoteVersion = DataStoreVersions.CURRENT_VERSION;
- public ReadyLocalTransaction(final TransactionIdentifier transactionID, final DataTreeModification modification,
+ public ReadyLocalTransaction(final TransactionIdentifier transactionId, final DataTreeModification modification,
final boolean doCommitOnReady) {
- this.transactionID = Preconditions.checkNotNull(transactionID);
+ this.transactionId = Preconditions.checkNotNull(transactionId);
this.modification = Preconditions.checkNotNull(modification);
this.doCommitOnReady = doCommitOnReady;
}
- public TransactionIdentifier getTransactionID() {
- return transactionID;
+ public TransactionIdentifier getTransactionId() {
+ return transactionId;
}
public DataTreeModification getModification() {
public byte[] toBinary(final Object obj) {
Preconditions.checkArgument(obj instanceof ReadyLocalTransaction, "Unsupported object type %s", obj.getClass());
final ReadyLocalTransaction readyLocal = (ReadyLocalTransaction) obj;
- final BatchedModifications batched = new BatchedModifications(readyLocal.getTransactionID(),
+ final BatchedModifications batched = new BatchedModifications(readyLocal.getTransactionId(),
readyLocal.getRemoteVersion());
batched.setDoCommitOnReady(readyLocal.isDoCommitOnReady());
batched.setTotalMessagesSent(1);
private YangInstanceIdentifier path;
private boolean registerOnAllInstances;
+ public RegisterDataTreeChangeListener() {
+ // For Externalizable
+ }
+
public RegisterDataTreeChangeListener(final YangInstanceIdentifier path, final ActorRef dataTreeChangeListenerPath,
final boolean registerOnAllInstances) {
this.path = Preconditions.checkNotNull(path);
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableList;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
private static final long serialVersionUID = 1L;
+ @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
+ + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
+ + "aren't serialized. FindBugs does not recognize this.")
private final List<FrontendClientMetadata> clients;
public FrontendShardDataTreeSnapshotMetadata(final Collection<FrontendClientMetadata> clients) {
import com.google.common.base.Verify;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableMap.Builder;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.Externalizable;
import java.io.IOException;
import java.io.ObjectInput;
private static final long serialVersionUID = 1L;
+ @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "This field is not Serializable but this class "
+ + "implements writeReplace to delegate serialization to a Proxy class and thus instances of this class "
+ + "aren't serialized. FindBugs does not recognize this.")
private final Map<Class<? extends ShardDataTreeSnapshotMetadata<?>>, ShardDataTreeSnapshotMetadata<?>> metadata;
+
+ @SuppressFBWarnings(value = "SE_BAD_FIELD", justification = "See above justification.")
private final NormalizedNode<?, ?> rootNode;
public MetadataShardDataTreeSnapshot(final NormalizedNode<?, ?> rootNode) {
private ClusterWrapper cluster;
private Configuration configuration;
private DatastoreContextFactory datastoreContextFactory;
- private CountDownLatch waitTillReadyCountdownLatch;
+ private CountDownLatch waitTillReadyCountDownLatch;
private PrimaryShardInfoFutureCache primaryShardInfoCache;
private DatastoreSnapshot restoreFromSnapshot;
private volatile boolean sealed;
return self();
}
- CountDownLatch getWaitTillReadyCountdownLatch() {
- return waitTillReadyCountdownLatch;
+ CountDownLatch getWaitTillReadyCountDownLatch() {
+ return waitTillReadyCountDownLatch;
}
- public T waitTillReadyCountdownLatch(CountDownLatch newWaitTillReadyCountdownLatch) {
+ public T waitTillReadyCountDownLatch(CountDownLatch newWaitTillReadyCountDownLatch) {
checkSealed();
- this.waitTillReadyCountdownLatch = newWaitTillReadyCountdownLatch;
+ this.waitTillReadyCountDownLatch = newWaitTillReadyCountDownLatch;
return self();
}
Preconditions.checkNotNull(cluster, "cluster should not be null");
Preconditions.checkNotNull(configuration, "configuration should not be null");
Preconditions.checkNotNull(datastoreContextFactory, "datastoreContextFactory should not be null");
- Preconditions.checkNotNull(waitTillReadyCountdownLatch, "waitTillReadyCountdownLatch should not be null");
+ Preconditions.checkNotNull(waitTillReadyCountDownLatch, "waitTillReadyCountdownLatch should not be null");
Preconditions.checkNotNull(primaryShardInfoCache, "primaryShardInfoCache should not be null");
}
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import java.io.ByteArrayInputStream;
+import java.io.IOException;
import java.io.ObjectInputStream;
import java.util.ArrayList;
import java.util.Collection;
this.type = datastoreContextFactory.getBaseDatastoreContext().getDataStoreName();
this.shardDispatcherPath =
new Dispatchers(context().system().dispatchers()).getDispatcherPath(Dispatchers.DispatcherType.Shard);
- this.waitTillReadyCountdownLatch = builder.getWaitTillReadyCountdownLatch();
+ this.waitTillReadyCountdownLatch = builder.getWaitTillReadyCountDownLatch();
this.primaryShardInfoCache = builder.getPrimaryShardInfoCache();
this.restoreFromSnapshot = builder.getRestoreFromSnapshot();
LOG.debug("{}: Deserialized restored ShardManagerSnapshot: {}", persistenceId(), snapshot);
applyShardManagerSnapshot(snapshot);
- } catch (Exception e) {
+ } catch (ClassNotFoundException | IOException e) {
LOG.error("{}: Error deserializing restored ShardManagerSnapshot", persistenceId(), e);
}
}
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
+import java.nio.charset.StandardCharsets;
import javax.xml.stream.XMLStreamException;
import org.opendaylight.controller.cluster.datastore.util.AbstractDataTreeModificationCursor;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
public static void toFile(File file, DataTreeModification modification) {
try (FileOutputStream outStream = new FileOutputStream(file)) {
modification.applyToCursor(new DataTreeModificationOutputCursor(new DataOutputStream(outStream)));
- } catch (Exception e) {
+ } catch (IOException | RuntimeException e) {
LOG.error("Error writing DataTreeModification to file {}", file, e);
}
}
@Override
public void delete(PathArgument child) {
try {
- output.write("\nDELETE -> ".getBytes());
- output.write(current().node(child).toString().getBytes());
+ output.write("\nDELETE -> ".getBytes(StandardCharsets.UTF_8));
+ output.write(current().node(child).toString().getBytes(StandardCharsets.UTF_8));
output.writeByte('\n');
} catch (IOException e) {
Throwables.propagate(e);
private void outputPathAndNode(String name, PathArgument child, NormalizedNode<?, ?> data) {
try {
output.writeByte('\n');
- output.write(name.getBytes());
- output.write(" -> ".getBytes());
- output.write(current().node(child).toString().getBytes());
- output.write(": \n".getBytes());
+ output.write(name.getBytes(StandardCharsets.UTF_8));
+ output.write(" -> ".getBytes(StandardCharsets.UTF_8));
+ output.write(current().node(child).toString().getBytes(StandardCharsets.UTF_8));
+ output.write(": \n".getBytes(StandardCharsets.UTF_8));
NormalizedNodeXMLOutput.toStream(output, data);
output.writeByte('\n');
} catch (IOException | XMLStreamException e) {
import akka.osgi.BundleDelegatingClassLoader;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
import java.util.concurrent.TimeUnit;
import org.opendaylight.controller.cluster.ActorSystemProvider;
import org.opendaylight.controller.cluster.ActorSystemProviderListener;
final Bundle bundle = bundleContext.getBundle();
- final BundleDelegatingClassLoader classLoader = new BundleDelegatingClassLoader(bundle, Thread.currentThread()
- .getContextClassLoader());
+ final BundleDelegatingClassLoader classLoader = AccessController.doPrivileged(
+ (PrivilegedAction<BundleDelegatingClassLoader>) () ->
+ new BundleDelegatingClassLoader(bundle, Thread.currentThread().getContextClassLoader()));
final AkkaConfigurationReader configurationReader = new FileAkkaConfigurationReader();
final Config akkaConfig = ConfigFactory.load(configurationReader.read()).getConfig(CONFIGURATION_NAME);
private final DatastoreContext datastoreContext = DatastoreContext.newBuilder().build();
- private final ShardStats shardStats = new ShardStats(SHARD_IDENTIFIER.toString(), "DataStore");
+ private final ShardStats shardStats = new ShardStats(SHARD_IDENTIFIER.toString(), "DataStore", null);
private ActorRef createShard() {
ActorRef shard = getSystem().actorOf(Shard.builder().id(SHARD_IDENTIFIER).datastoreContext(datastoreContext)
try {
assertNotNull("Unexpected " + name, expType);
assertEquals(name + " type", expType, rawMessage.getClass());
- assertEquals(name + " transactionId", builder.transactionId, actualMessage.getTransactionID());
+ assertEquals(name + " transactionId", builder.transactionId, actualMessage.getTransactionId());
if (reply instanceof Throwable) {
getSender().tell(new akka.actor.Status.Failure((Throwable)reply), self());
@Before
public void setUp() throws Exception {
- shardStats = new ShardStats("shard-1", "DataStore");
+ shardStats = new ShardStats("shard-1", "DataStore", null);
shardStats.registerMBean();
mbeanServer = ManagementFactory.getPlatformMBeanServer();
String objectName = AbstractMXBean.BASE_JMX_PREFIX + "type=" + shardStats.getMBeanType() + ",Category="
AbortTransaction actual = AbortTransaction.fromSerializable(
SerializationUtils.clone((Serializable) serialized));
- assertEquals("getTransactionID", expected.getTransactionID(), actual.getTransactionID());
+ assertEquals("getTransactionID", expected.getTransactionId(), actual.getTransactionId());
assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
}
(Serializable) batched.toSerializable());
assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, clone.getVersion());
- assertEquals("getTransactionID", tx1, clone.getTransactionID());
+ assertEquals("getTransactionID", tx1, clone.getTransactionId());
assertEquals("isReady", true, clone.isReady());
assertEquals("getTotalMessagesSent", 5, clone.getTotalMessagesSent());
clone = (BatchedModifications) SerializationUtils.clone((Serializable) batched.toSerializable());
assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, clone.getVersion());
- assertEquals("getTransactionID", tx2, clone.getTransactionID());
+ assertEquals("getTransactionID", tx2, clone.getTransactionId());
assertEquals("isReady", false, clone.isReady());
assertEquals("getModifications size", 0, clone.getModifications().size());
CanCommitTransaction actual = CanCommitTransaction.fromSerializable(
SerializationUtils.clone((Serializable) serialized));
- assertEquals("getTransactionID", expected.getTransactionID(), actual.getTransactionID());
+ assertEquals("getTransactionID", expected.getTransactionId(), actual.getTransactionId());
assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
}
CommitTransaction actual = CommitTransaction.fromSerializable(
SerializationUtils.clone((Serializable) serialized));
- assertEquals("getTransactionID", expected.getTransactionID(), actual.getTransactionID());
+ assertEquals("getTransactionID", expected.getTransactionId(), actual.getTransactionId());
assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, actual.getVersion());
}
assertNotNull("fromBinary returned null", deserialized);
assertEquals("fromBinary return type", BatchedModifications.class, deserialized.getClass());
BatchedModifications batched = (BatchedModifications)deserialized;
- assertEquals("getTransactionID", txId, batched.getTransactionID());
+ assertEquals("getTransactionID", txId, batched.getTransactionId());
assertEquals("getVersion", DataStoreVersions.CURRENT_VERSION, batched.getVersion());
List<Modification> batchedMods = batched.getModifications();
AbstractGenericCreator(Class<C> shardManagerClass) {
this.shardManagerClass = shardManagerClass;
- cluster(new MockClusterWrapper()).configuration(new MockConfiguration()).waitTillReadyCountdownLatch(ready)
+ cluster(new MockClusterWrapper()).configuration(new MockConfiguration()).waitTillReadyCountDownLatch(ready)
.primaryShardInfoCache(new PrimaryShardInfoFutureCache());
}