import akka.actor.ActorSelection;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
-import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import com.google.common.base.Supplier;
import com.google.common.collect.Iterables;
-import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FluentFuture;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
+import java.util.Optional;
import java.util.Set;
+import java.util.SortedSet;
import java.util.TreeMap;
+import java.util.TreeSet;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
import org.opendaylight.controller.cluster.datastore.messages.AbstractRead;
import org.opendaylight.controller.cluster.datastore.messages.DataExists;
import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
import org.opendaylight.controller.cluster.datastore.utils.NormalizedNodeAggregator;
-import org.opendaylight.mdsal.common.api.MappingCheckedFuture;
-import org.opendaylight.mdsal.common.api.ReadFailedException;
import org.opendaylight.mdsal.dom.spi.store.AbstractDOMStoreTransaction;
import org.opendaylight.mdsal.dom.spi.store.DOMStoreReadWriteTransaction;
import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
private static final Logger LOG = LoggerFactory.getLogger(TransactionProxy.class);
- // Global lock used for transactions spanning multiple shards - synchronizes sending of the ready messages
- // for atomicity to avoid potential deadlock with concurrent transactions spanning the same shards as outlined
- // in the following scenario:
- //
- // - Tx1 sends ready message to shard A
- // - Tx2 sends ready message to shard A
- // - Tx2 sends ready message to shard B
- // - Tx1 sends ready message to shard B
- //
- // This scenario results in deadlock: after Tx1 canCommits to shard A, it can't proceed with shard B until Tx2
- // completes as Tx2 was readied first on shard B. However Tx2 cannot make progress because it's waiting to canCommit
- // on shard A which is blocked by Tx1.
- //
- // The global lock avoids this as it forces the ready messages to be sent in a predictable order:
- //
- // - Tx1 sends ready message to shard A
- // - Tx1 sends ready message to shard B
- // - Tx2 sends ready message to shard A
- // - Tx2 sends ready message to shard B
- //
- private static final Object GLOBAL_TX_READY_LOCK = new Object();
-
private final Map<String, TransactionContextWrapper> txContextWrappers = new TreeMap<>();
private final AbstractTransactionContextFactory<?> txContextFactory;
private final TransactionType type;
}
@Override
- public CheckedFuture<Boolean, ReadFailedException> exists(final YangInstanceIdentifier path) {
+ public FluentFuture<Boolean> exists(final YangInstanceIdentifier path) {
return executeRead(shardNameFromIdentifier(path), new DataExists(path, DataStoreVersions.CURRENT_VERSION));
}
- private <T> CheckedFuture<T, ReadFailedException> executeRead(final String shardName,
- final AbstractRead<T> readCmd) {
+ private <T> FluentFuture<T> executeRead(final String shardName, final AbstractRead<T> readCmd) {
Preconditions.checkState(type != TransactionType.WRITE_ONLY,
"Reads from write-only transactions are not allowed");
}
});
- return MappingCheckedFuture.create(proxyFuture, ReadFailedException.MAPPER);
+ return FluentFuture.from(proxyFuture);
}
@Override
- public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(final YangInstanceIdentifier path) {
+ public FluentFuture<Optional<NormalizedNode<?, ?>>> read(final YangInstanceIdentifier path) {
Preconditions.checkState(type != TransactionType.WRITE_ONLY,
"Reads from write-only transactions are not allowed");
Preconditions.checkNotNull(path, "path should not be null");
return path.isEmpty() ? readAllData() : singleShardRead(shardNameFromIdentifier(path), path);
}
- private CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> singleShardRead(
+ private FluentFuture<Optional<NormalizedNode<?, ?>>> singleShardRead(
final String shardName, final YangInstanceIdentifier path) {
return executeRead(shardName, new ReadData(path, DataStoreVersions.CURRENT_VERSION));
}
- private CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readAllData() {
+ private FluentFuture<Optional<NormalizedNode<?, ?>>> readAllData() {
final Set<String> allShardNames = txContextFactory.getActorContext().getConfiguration().getAllShardNames();
- final Collection<CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>> futures =
- new ArrayList<>(allShardNames.size());
+ final Collection<FluentFuture<Optional<NormalizedNode<?, ?>>>> futures = new ArrayList<>(allShardNames.size());
for (String shardName : allShardNames) {
futures.add(singleShardRead(shardName, YangInstanceIdentifier.EMPTY));
}
}, MoreExecutors.directExecutor());
- return MappingCheckedFuture.create(aggregateFuture, ReadFailedException.MAPPER);
+ return FluentFuture.from(aggregateFuture);
}
@Override
ret = createSingleCommitCohort(e.getKey(), e.getValue());
break;
default:
- ret = createMultiCommitCohort(txContextWrappers.entrySet());
+ ret = createMultiCommitCohort();
}
txContextFactory.onTransactionReady(getIdentifier(), ret.getCohortFutures());
return transactionContext.directCommit(havePermit);
}
- private AbstractThreePhaseCommitCohort<ActorSelection> createMultiCommitCohort(
- final Set<Entry<String, TransactionContextWrapper>> txContextWrapperEntries) {
-
- final List<ThreePhaseCommitCohortProxy.CohortInfo> cohorts = new ArrayList<>(txContextWrapperEntries.size());
+ private AbstractThreePhaseCommitCohort<ActorSelection> createMultiCommitCohort() {
- synchronized (GLOBAL_TX_READY_LOCK) {
- for (Entry<String, TransactionContextWrapper> e : txContextWrapperEntries) {
- LOG.debug("Tx {} Readying transaction for shard {}", getIdentifier(), e.getKey());
+ final List<ThreePhaseCommitCohortProxy.CohortInfo> cohorts = new ArrayList<>(txContextWrappers.size());
+ final java.util.Optional<SortedSet<String>> shardNames =
+ java.util.Optional.of(new TreeSet<>(txContextWrappers.keySet()));
+ for (Entry<String, TransactionContextWrapper> e : txContextWrappers.entrySet()) {
+ LOG.debug("Tx {} Readying transaction for shard {}", getIdentifier(), e.getKey());
- final TransactionContextWrapper wrapper = e.getValue();
+ final TransactionContextWrapper wrapper = e.getValue();
- // The remote tx version is obtained the via TransactionContext which may not be available yet so
- // we pass a Supplier to dynamically obtain it. Once the ready Future is resolved the
- // TransactionContext is available.
- Supplier<Short> txVersionSupplier = () -> wrapper.getTransactionContext().getTransactionVersion();
+ // The remote tx version is obtained the via TransactionContext which may not be available yet so
+ // we pass a Supplier to dynamically obtain it. Once the ready Future is resolved the
+ // TransactionContext is available.
+ Supplier<Short> txVersionSupplier = () -> wrapper.getTransactionContext().getTransactionVersion();
- cohorts.add(new ThreePhaseCommitCohortProxy.CohortInfo(wrapper.readyTransaction(), txVersionSupplier));
- }
+ cohorts.add(new ThreePhaseCommitCohortProxy.CohortInfo(wrapper.readyTransaction(shardNames),
+ txVersionSupplier));
}
return new ThreePhaseCommitCohortProxy(txContextFactory.getActorContext(), cohorts, getIdentifier());