X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fdatabroker%2Factors%2Fdds%2FAbstractClientHistory.java;h=47283843d478401a2b72275e6ea87ca3347a0fcc;hb=823bd74f34ee1c651f1f90daeef386a35c68d431;hp=8ab58e410aa419a3f0711188f6b5af29fe2177a9;hpb=db9a673c114febc785fbd324947ac2c3e3095d06;p=controller.git diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHistory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHistory.java index 8ab58e410a..47283843d4 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHistory.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractClientHistory.java @@ -14,9 +14,11 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLongFieldUpdater; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; +import java.util.concurrent.locks.StampedLock; import javax.annotation.concurrent.GuardedBy; import org.opendaylight.controller.cluster.access.client.AbstractClientConnection; import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection; +import org.opendaylight.controller.cluster.access.client.ConnectionEntry; import org.opendaylight.controller.cluster.access.client.InversibleLockException; import org.opendaylight.controller.cluster.access.commands.CreateLocalHistoryRequest; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; @@ -48,11 +50,14 @@ abstract class AbstractClientHistory extends LocalAbortable implements Identifia AtomicReferenceFieldUpdater.newUpdater(AbstractClientHistory.class, State.class, "state"); @GuardedBy("this") - private final Map openTransactions = new HashMap<>(); + private final Map> openTransactions = new HashMap<>(); @GuardedBy("this") private final Map readyTransactions = new HashMap<>(); + @GuardedBy("lock") private final Map histories = new ConcurrentHashMap<>(); + private final StampedLock lock = new StampedLock(); + private final AbstractDataStoreClientBehavior client; private final LocalHistoryIdentifier identifier; @@ -98,7 +103,7 @@ abstract class AbstractClientHistory extends LocalAbortable implements Identifia LOG.debug("Force-closing history {}", getIdentifier(), cause); synchronized (this) { - for (ClientTransaction t : openTransactions.values()) { + for (AbstractClientHandle t : openTransactions.values()) { t.localAbort(cause); } openTransactions.clear(); @@ -112,6 +117,7 @@ abstract class AbstractClientHistory extends LocalAbortable implements Identifia * * @throws InversibleLockException if the shard is being reconnected */ + @GuardedBy("lock") private ProxyHistory createHistoryProxy(final Long shard) { final AbstractClientConnection connection = client.getConnection(shard); final LocalHistoryIdentifier proxyId = new LocalHistoryIdentifier(identifier.getClientId(), @@ -135,32 +141,48 @@ abstract class AbstractClientHistory extends LocalAbortable implements Identifia LOG.debug("Create history response {}", response); } - final AbstractProxyTransaction createTransactionProxy(final TransactionIdentifier transactionId, final Long shard) { + private ProxyHistory ensureHistoryProxy(final TransactionIdentifier transactionId, final Long shard) { while (true) { - final ProxyHistory history; try { - history = histories.computeIfAbsent(shard, this::createHistoryProxy); + // Short-lived lock to ensure exclusion of createHistoryProxy and the lookup phase in startReconnect, + // see comments in startReconnect() for details. + final long stamp = lock.readLock(); + try { + return histories.computeIfAbsent(shard, this::createHistoryProxy); + } finally { + lock.unlockRead(stamp); + } } catch (InversibleLockException e) { LOG.trace("Waiting for transaction {} shard {} connection to resolve", transactionId, shard); e.awaitResolution(); LOG.trace("Retrying transaction {} shard {} connection", transactionId, shard); - continue; } + } + } - return history.createTransactionProxy(transactionId); + final AbstractProxyTransaction createSnapshotProxy(final TransactionIdentifier transactionId, final Long shard) { + return ensureHistoryProxy(transactionId, shard).createTransactionProxy(transactionId, true); + } + + final AbstractProxyTransaction createTransactionProxy(final TransactionIdentifier transactionId, final Long shard) { + return ensureHistoryProxy(transactionId, shard).createTransactionProxy(transactionId, false); + } + + private void checkNotClosed() { + if (state == State.CLOSED) { + throw new TransactionChainClosedException(String.format("Local history %s is closed", identifier)); } } /** - * Allocate a {@link ClientTransaction}. + * Allocate a new {@link ClientTransaction}. * * @return A new {@link ClientTransaction} * @throws TransactionChainClosedException if this history is closed + * @throws IllegalStateException if a previous dependent transaction has not been closed */ public final ClientTransaction createTransaction() { - if (state == State.CLOSED) { - throw new TransactionChainClosedException(String.format("Local history %s is closed", identifier)); - } + checkNotClosed(); synchronized (this) { final ClientTransaction ret = doCreateTransaction(); @@ -169,6 +191,26 @@ abstract class AbstractClientHistory extends LocalAbortable implements Identifia } } + /** + * Create a new {@link ClientSnapshot}. + * + * @return A new {@link ClientSnapshot} + * @throws TransactionChainClosedException if this history is closed + * @throws IllegalStateException if a previous dependent transaction has not been closed + */ + public final ClientSnapshot takeSnapshot() { + checkNotClosed(); + + synchronized (this) { + final ClientSnapshot ret = doCreateSnapshot(); + openTransactions.put(ret.getIdentifier(), ret); + return ret; + } + } + + @GuardedBy("this") + abstract ClientSnapshot doCreateSnapshot(); + @GuardedBy("this") abstract ClientTransaction doCreateTransaction(); @@ -178,10 +220,12 @@ abstract class AbstractClientHistory extends LocalAbortable implements Identifia * @param txId Transaction identifier * @param cohort Transaction commit cohort */ - synchronized AbstractTransactionCommitCohort onTransactionReady(final TransactionIdentifier txId, + synchronized AbstractTransactionCommitCohort onTransactionReady(final ClientTransaction tx, final AbstractTransactionCommitCohort cohort) { - final ClientTransaction tx = openTransactions.remove(txId); - Preconditions.checkState(tx != null, "Failed to find open transaction for %s", txId); + final TransactionIdentifier txId = tx.getIdentifier(); + if (openTransactions.remove(txId) == null) { + LOG.warn("Transaction {} not recorded, proceeding with readiness", txId); + } final AbstractTransactionCommitCohort previous = readyTransactions.putIfAbsent(txId, cohort); Preconditions.checkState(previous == null, "Duplicate cohort %s for transaction %s, already have %s", @@ -195,11 +239,11 @@ abstract class AbstractClientHistory extends LocalAbortable implements Identifia * Callback invoked from {@link ClientTransaction} when a child transaction has been aborted without touching * backend. * - * @param txId transaction identifier + * @param snapshot transaction identifier */ - synchronized void onTransactionAbort(final TransactionIdentifier txId) { - if (openTransactions.remove(txId) == null) { - LOG.warn("Could not find aborting transaction {}", txId); + synchronized void onTransactionAbort(final AbstractClientHandle snapshot) { + if (openTransactions.remove(snapshot.getIdentifier()) == null) { + LOG.warn("Could not find aborting transaction {}", snapshot.getIdentifier()); } } @@ -216,7 +260,26 @@ abstract class AbstractClientHistory extends LocalAbortable implements Identifia } HistoryReconnectCohort startReconnect(final ConnectedClientConnection newConn) { - final ProxyHistory oldProxy = histories.get(newConn.cookie()); + /* + * This looks ugly and unusual and there is a reason for that, as the locking involved is in multiple places. + * + * We need to make sure that a new proxy is not created while we are reconnecting, which is partially satisfied + * by client.getConnection() throwing InversibleLockException by the time this method is invoked. That does + * not cover the case when createHistoryProxy() has already acquired the connection, but has not yet populated + * the history map. + * + * Hence we need to make sure no potential computation is happening concurrently with us looking at the history + * map. Once we have performed that lookup, though, we can release the lock immediately, as all creation + * requests are established to happen either before or after the reconnect attempt. + */ + final ProxyHistory oldProxy; + final long stamp = lock.writeLock(); + try { + oldProxy = histories.get(newConn.cookie()); + } finally { + lock.unlockWrite(stamp); + } + if (oldProxy == null) { return null; } @@ -229,8 +292,8 @@ abstract class AbstractClientHistory extends LocalAbortable implements Identifia } @Override - void replaySuccessfulRequests() { - proxy.replaySuccessfulRequests(); + void replaySuccessfulRequests(final Iterable previousEntries) { + proxy.replaySuccessfulRequests(previousEntries); } @Override