}
if (!bundlesToDestroy.isEmpty()) {
- Collections.sort(bundlesToDestroy, (b1, b2) -> (int) (b2.getLastModified() - b1.getLastModified()));
+ bundlesToDestroy.sort((b1, b2) -> (int) (b2.getLastModified() - b1.getLastModified()));
LOG.debug("Selected bundles {} for destroy (no services in use)", bundlesToDestroy);
} else {
}
final Set<DOMRpcIdentifier> rpcs = ImmutableSet.copyOf(Collections2.transform(paths, DOMRpcIdentifier::create));
- reg = domRpcProvider.registerRpcImplementation((rpc, input) -> {
- return FluentFutures.immediateFailedFluentFuture(new DOMRpcImplementationNotAvailableException(
- "Action %s has no instance matching %s", rpc, input));
- }, rpcs);
+ reg = domRpcProvider.registerRpcImplementation(
+ (rpc, input) -> FluentFutures.immediateFailedFluentFuture(new DOMRpcImplementationNotAvailableException(
+ "Action %s has no instance matching %s", rpc, input)), rpcs);
LOG.debug("Registered provider for {}", interfaceName);
}
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.Arrays;
import java.util.Collections;
+import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
private final Object anchor;
private final String createDescription;
- private final Set<CloseTracked<T>> tracked = new ConcurrentSkipListSet<>(
- (o1, o2) -> Integer.compare(System.identityHashCode(o1), System.identityHashCode(o2)));
+ private final Set<CloseTracked<T>> tracked =
+ new ConcurrentSkipListSet<>(Comparator.comparingInt(System::identityHashCode));
private final boolean isDebugContextEnabled;
}
Set<CloseTrackedRegistryReportEntry<T>> report = new HashSet<>();
- map.forEach((stackTraceElements, number) -> {
- copyOfTracked.stream().filter(closeTracked -> {
- StackTraceElement[] closeTrackedStackTraceArray = closeTracked.getAllocationContextStackTrace();
- List<StackTraceElement> closeTrackedStackTraceElements =
- closeTrackedStackTraceArray != null ? asList(closeTrackedStackTraceArray) : emptyList();
- return closeTrackedStackTraceElements.equals(stackTraceElements);
- }).findAny().ifPresent(exampleCloseTracked -> {
- report.add(new CloseTrackedRegistryReportEntry<>(exampleCloseTracked, number, stackTraceElements));
- });
- });
+ map.forEach((stackTraceElements, number) -> copyOfTracked.stream().filter(closeTracked -> {
+ StackTraceElement[] closeTrackedStackTraceArray = closeTracked.getAllocationContextStackTrace();
+ List<StackTraceElement> closeTrackedStackTraceElements =
+ closeTrackedStackTraceArray != null ? asList(closeTrackedStackTraceArray) : emptyList();
+ return closeTrackedStackTraceElements.equals(stackTraceElements);
+ }).findAny().ifPresent(exampleCloseTracked -> report.add(
+ new CloseTrackedRegistryReportEntry<>(exampleCloseTracked, number, stackTraceElements))));
return report;
}
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
-import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
Map<Long, Object> journal = JOURNALS.get(persistenceId);
if (journal != null) {
synchronized (journal) {
- Iterator<Long> iter = journal.keySet().iterator();
- while (iter.hasNext()) {
- Long num = iter.next();
- if (num <= toSequenceNr) {
- iter.remove();
- }
- }
+ journal.keySet().removeIf(num -> num <= toSequenceNr);
}
}
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
+import java.util.Objects;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
final Root root = new RootBuilder().setListInRoot(listInRoots).build();
final TestListener<Root> listener = createListener(LogicalDatastoreType.CONFIGURATION, ROOT_PATH,
- match(ModificationType.WRITE, ROOT_PATH, dataBefore -> dataBefore == null,
+ match(ModificationType.WRITE, ROOT_PATH, Objects::isNull,
(Function<Root, Boolean>) dataAfter -> checkData(root, dataAfter)));
final ReadWriteTransaction readWriteTransaction = getDataBroker().newReadWriteTransaction();
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import com.google.common.io.ByteSource;
import java.io.IOException;
stateCache = CacheBuilder.newBuilder()
.expireAfterAccess(builder.expireStateAfterInactivityDuration, builder.expireStateAfterInactivityUnit)
- .removalListener((RemovalListener<Identifier, AssembledMessageState>) notification ->
- stateRemoved(notification)).build();
+ .removalListener(this::stateRemoved).build();
}
/**
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.io.Serializable;
-import java.util.Iterator;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
this.logContext = builder.logContext + "_slicer-id-" + id;
CacheBuilder<Identifier, SlicedMessageState<ActorRef>> cacheBuilder =
- CacheBuilder.newBuilder().removalListener(notification -> stateRemoved(notification));
+ CacheBuilder.newBuilder().removalListener(this::stateRemoved);
if (builder.expireStateAfterInactivityDuration > 0) {
cacheBuilder = cacheBuilder.expireAfterAccess(builder.expireStateAfterInactivityDuration,
builder.expireStateAfterInactivityUnit);
* @param filter filters by Identifier
*/
public void cancelSlicing(@Nonnull final Predicate<Identifier> filter) {
- final Iterator<MessageSliceIdentifier> iter = stateCache.asMap().keySet().iterator();
- while (iter.hasNext()) {
- if (filter.test(iter.next().getClientIdentifier())) {
- iter.remove();
- }
- }
+ stateCache.asMap().keySet().removeIf(
+ messageSliceIdentifier -> filter.test(messageSliceIdentifier.getClientIdentifier()));
}
private static MessageSlice getNextSliceMessage(final SlicedMessageState<ActorRef> state) throws IOException {
LOG.debug("Shard {} resolved to {}, attempting to connect", shardName, info);
FutureConverters.toJava(ExplicitAsk.ask(info.getPrimaryShardActor(), connectFunction, CONNECT_TIMEOUT))
- .whenComplete((response, failure) -> {
- onConnectResponse(shardName, cookie, future, response, failure);
- });
+ .whenComplete((response, failure) -> onConnectResponse(shardName, cookie, future, response, failure));
}
private void onConnectResponse(final String shardName, final long cookie,
TransactionIdentifier txId = message.getTxId();
ListenableFuture<S> future = process(handledMessageType.cast(message));
Executor callbackExecutor = future.isDone() ? MoreExecutors.directExecutor()
- : runnable -> executeInSelf(runnable);
+ : DataTreeCohortActor.this::executeInSelf;
Futures.addCallback(future, new FutureCallback<S>() {
@Override
public void onSuccess(S nextStep) {
import java.util.AbstractMap.SimpleImmutableEntry;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
// Sort the property keys by putting the names prefixed with the data store type last. This
// is done so data store specific settings are applied after global settings.
final ArrayList<String> keys = new ArrayList<>(inKeys);
- Collections.sort(keys, (key1, key2) -> key1.startsWith(dataStoreTypePrefix) ? 1 :
- key2.startsWith(dataStoreTypePrefix) ? -1 : key1.compareTo(key2));
+ keys.sort((key1, key2) -> key1.startsWith(dataStoreTypePrefix) ? 1 :
+ key2.startsWith(dataStoreTypePrefix) ? -1 : key1.compareTo(key2));
return keys;
}
responseMessageSlicer.slice(SliceOptions.builder().identifier(success.getTarget())
.message(envelope.newSuccessEnvelope(success, executionTimeNanos))
.sendTo(envelope.getMessage().getReplyTo()).replyTo(self())
- .onFailureCallback(t -> {
- LOG.warn("Error slicing response {}", success, t);
- }).build()));
+ .onFailureCallback(t -> LOG.warn("Error slicing response {}", success, t)).build()));
} else {
envelope.sendSuccess(success, executionTimeNanos);
}
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
-import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import javax.annotation.Nonnull;
}
void checkForExpiredTransactions(final long timeout, final Shard shard) {
- Iterator<CohortEntry> iter = cohortCache.values().iterator();
- while (iter.hasNext()) {
- CohortEntry cohortEntry = iter.next();
- if (cohortEntry.isFailed()) {
- iter.remove();
- }
- }
+ cohortCache.values().removeIf(CohortEntry::isFailed);
}
void abortPendingTransactions(final String reason, final Shard shard) {
ShardDataTreeCohort createReadyCohort(final TransactionIdentifier txId, final DataTreeModification mod,
final java.util.Optional<SortedSet<String>> participatingShardNames) {
SimpleShardDataTreeCohort cohort = new SimpleShardDataTreeCohort(this, mod, txId,
- cohortRegistry.createCohort(schemaContext, txId, runnable -> shard.executeInSelf(runnable),
+ cohortRegistry.createCohort(schemaContext, txId, shard::executeInSelf,
COMMIT_STEP_TIMEOUT), participatingShardNames);
pendingTransactions.add(new CommitEntry(cohort, readTime()));
return cohort;
}
private Collection<String> addToCurrentCandidates(YangInstanceIdentifier entityId, String newCandidate) {
- Collection<String> candidates = currentCandidates.get(entityId);
- if (candidates == null) {
- candidates = new LinkedHashSet<>();
- currentCandidates.put(entityId, candidates);
- }
+ Collection<String> candidates = currentCandidates.computeIfAbsent(entityId, k -> new LinkedHashSet<>());
candidates.add(newCandidate);
return candidates;
}
// Prune the subsequent pending modifications.
- Iterator<Modification> iter = pendingModifications.iterator();
- while (iter.hasNext()) {
- Modification mod = iter.next();
- if (!canForwardModificationToNewLeader(mod)) {
- iter.remove();
- }
- }
+ pendingModifications.removeIf(mod -> !canForwardModificationToNewLeader(mod));
}
}
map.put(candidateName, count);
statistics.put(entityType, map);
} else {
- Long candidateOwnedEntities = map.get(candidateName);
- if (candidateOwnedEntities == null) {
- map.put(candidateName, count);
- } else {
- map.put(candidateName, candidateOwnedEntities + count);
- }
+ map.merge(candidateName, count, (ownedEntities, addedEntities) -> ownedEntities + addedEntities);
}
}
}
import com.google.common.util.concurrent.SettableFuture;
import com.google.common.util.concurrent.Uninterruptibles;
import java.util.AbstractMap.SimpleEntry;
-import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.Comparator;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.List;
void resolveShardAdditions(final Set<DOMDataTreeIdentifier> additions) {
LOG.debug("{}: Resolving additions : {}", memberName, additions);
- final ArrayList<DOMDataTreeIdentifier> list = new ArrayList<>(additions);
// we need to register the shards from top to bottom, so we need to atleast make sure the ordering reflects that
- Collections.sort(list, (o1, o2) -> {
- if (o1.getRootIdentifier().getPathArguments().size() < o2.getRootIdentifier().getPathArguments().size()) {
- return -1;
- } else if (o1.getRootIdentifier().getPathArguments().size()
- == o2.getRootIdentifier().getPathArguments().size()) {
- return 0;
- } else {
- return 1;
- }
- });
- list.forEach(this::createShardFrontend);
+ additions
+ .stream()
+ .sorted(Comparator.comparingInt(o -> o.getRootIdentifier().getPathArguments().size()))
+ .forEachOrdered(this::createShardFrontend);
}
void resolveShardRemovals(final Set<DOMDataTreeIdentifier> removals) {
leaderLastApplied.set(rs.getLastApplied());
});
- verifyRaftState(peer2, rs -> {
- assertEquals("LastApplied", leaderLastApplied.get(), rs.getLastIndex());
- });
+ verifyRaftState(peer2, rs -> assertEquals("LastApplied", leaderLastApplied.get(), rs.getLastIndex()));
// Kill the local leader and elect peer2 the leader. This should cause a new owner to be selected for
// the entities (1 and 3) previously owned by the local leader member.
Function<ShardSnapshot, String> shardNameTransformer = ShardSnapshot::getName;
assertEquals("Shard names", Sets.newHashSet("shard1", "shard2"), Sets.newHashSet(
- Lists.transform(datastoreSnapshot.getShardSnapshots(), shardNameTransformer)));
+ datastoreSnapshot.getShardSnapshots().stream().map(shardNameTransformer).collect(Collectors.toSet())));
// Add a new replica
});
measure("Txs:1 Submit, Finish", (Callable<Void>) () -> {
- measure("Txs:1 Submit", (Callable<ListenableFuture<?>>) () -> writeTx.submit()).get();
+ measure("Txs:1 Submit", (Callable<ListenableFuture<?>>) writeTx::submit).get();
return null;
});
}
import org.opendaylight.controller.md.sal.dom.api.DOMTransactionChain;
import org.opendaylight.mdsal.common.api.CommitInfo;
import org.opendaylight.mdsal.common.api.MappingCheckedFuture;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohort;
-import org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistration;
import org.opendaylight.mdsal.dom.api.DOMDataTreeReadTransaction;
import org.opendaylight.mdsal.dom.api.DOMDataTreeReadWriteTransaction;
import org.opendaylight.mdsal.dom.api.DOMDataTreeWriteTransaction;
final org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener delegateListener;
if (listener instanceof ClusteredDOMDataTreeChangeListener) {
delegateListener = (org.opendaylight.mdsal.dom.api.ClusteredDOMDataTreeChangeListener)
- changes -> listener.onDataTreeChanged(changes);
+ listener::onDataTreeChanged;
} else {
- delegateListener = changes -> listener.onDataTreeChanged(changes);
+ delegateListener = listener::onDataTreeChanged;
}
final ListenerRegistration<org.opendaylight.mdsal.dom.api.DOMDataTreeChangeListener> reg =
(org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry) delegateExtensions.get(
org.opendaylight.mdsal.dom.api.DOMDataTreeCommitCohortRegistry.class);
if (delegateCohortRegistry != null) {
- extBuilder.put(DOMDataTreeCommitCohortRegistry.class, new DOMDataTreeCommitCohortRegistry() {
- @Override
- public <T extends DOMDataTreeCommitCohort> DOMDataTreeCommitCohortRegistration<T> registerCommitCohort(
- org.opendaylight.mdsal.dom.api.DOMDataTreeIdentifier path, T cohort) {
- return delegateCohortRegistry.registerCommitCohort(path, cohort);
- }
- });
+ extBuilder.put(DOMDataTreeCommitCohortRegistry.class, delegateCohortRegistry::registerCommitCohort);
}
extensions = extBuilder.build();