X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fdatabroker%2Factors%2Fdds%2FAbstractDataStoreClientBehavior.java;h=7187f83a1ac060d41341c110181877b9535b2985;hp=3dc4dbf1469d989c05800bce57d3a175bd2e76cd;hb=99f80f27bee37bb23e345420bf14bb7bb4793c28;hpb=b4d95acff78952020e9fbde4372d13b461fd7469 diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientBehavior.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientBehavior.java index 3dc4dbf146..7187f83a1a 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientBehavior.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/databroker/actors/dds/AbstractDataStoreClientBehavior.java @@ -16,11 +16,15 @@ import java.util.Collection; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; -import org.opendaylight.controller.cluster.access.client.BackendInfoResolver; +import java.util.concurrent.locks.StampedLock; +import java.util.stream.Stream; import org.opendaylight.controller.cluster.access.client.ClientActorBehavior; import org.opendaylight.controller.cluster.access.client.ClientActorContext; import org.opendaylight.controller.cluster.access.client.ConnectedClientConnection; +import org.opendaylight.controller.cluster.access.client.ConnectionEntry; +import org.opendaylight.controller.cluster.access.client.ReconnectForwarder; import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; +import org.opendaylight.controller.cluster.datastore.utils.ActorUtils; import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -62,12 +66,13 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior histories = new ConcurrentHashMap<>(); private final AtomicLong nextHistoryId = new AtomicLong(1); + private final StampedLock lock = new StampedLock(); private final SingleClientHistory singleHistory; private volatile Throwable aborted; AbstractDataStoreClientBehavior(final ClientActorContext context, - final BackendInfoResolver resolver) { + final AbstractShardBackendResolver resolver) { super(context, resolver); singleHistory = new SingleClientHistory(this, new LocalHistoryIdentifier(getIdentifier(), 0)); } @@ -89,14 +94,19 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior currentBehavior) { @@ -121,6 +131,8 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior newConn) { + final long stamp = lock.writeLock(); + // Step 1: Freeze all AbstractProxyHistory instances pointing to that shard. This indirectly means that no // further TransactionProxies can be created and we can safely traverse maps without risking // missing an entry @@ -130,25 +142,33 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior { - try { - // Step 2: Collect previous successful requests from the cohorts. We do not want to expose - // the non-throttling interface to the connection, hence we use a wrapper consumer - for (HistoryReconnectCohort c : cohorts) { - c.replaySuccessfulRequests(previousEntries); - } + return previousEntries -> finishReconnect(newConn, stamp, cohorts, previousEntries); + } - // Step 3: Install a forwarder, which will forward requests back to affected cohorts. Any outstanding - // requests will be immediately sent to it and requests being sent concurrently will get - // forwarded once they hit the new connection. - return BouncingReconnectForwarder.forCohorts(newConn, cohorts); - } finally { + private ReconnectForwarder finishReconnect(final ConnectedClientConnection newConn, + final long stamp, final Collection cohorts, + final Collection previousEntries) { + try { + // Step 2: Collect previous successful requests from the cohorts. We do not want to expose + // the non-throttling interface to the connection, hence we use a wrapper consumer + for (HistoryReconnectCohort c : cohorts) { + c.replayRequests(previousEntries); + } + + // Step 3: Install a forwarder, which will forward requests back to affected cohorts. Any outstanding + // requests will be immediately sent to it and requests being sent concurrently will get + // forwarded once they hit the new connection. + return BouncingReconnectForwarder.forCohorts(newConn, cohorts); + } finally { + try { // Step 4: Complete switchover of the connection. The cohorts can resume normal operations. for (HistoryReconnectCohort c : cohorts) { c.close(); } + } finally { + lock.unlockWrite(stamp); } - }; + } } private static void startReconnect(final AbstractClientHistory history, @@ -170,19 +190,22 @@ abstract class AbstractDataStoreClientBehavior extends ClientActorBehavior resolveAllShards(); + + final ActorUtils actorUtils() { + return ((AbstractShardBackendResolver) resolver()).actorUtils(); + } }