X-Git-Url: https://git.opendaylight.org/gerrit/gitweb?p=controller.git;a=blobdiff_plain;f=opendaylight%2Fmd-sal%2Fsal-distributed-datastore%2Fsrc%2Fmain%2Fjava%2Forg%2Fopendaylight%2Fcontroller%2Fcluster%2Fdatastore%2FShardSnapshotCohort.java;h=c7bc20f7546599876d4f99a9ca07fc174ed836ed;hp=600509a26b87b480156f4baf843f2b887ed4f655;hb=e7e69069ae5ecaacc9ea0e47cb40cdf68237d636;hpb=4c2eb5190241306df418c45253a914d06eb6815b diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardSnapshotCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardSnapshotCohort.java index 600509a26b..c7bc20f754 100644 --- a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardSnapshotCohort.java +++ b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardSnapshotCohort.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved. + * Copyright (c) 2015, 2017 Brocade Communications Systems, Inc. and others. All rights reserved. * * This program and the accompanying materials are made available under the * terms of the Eclipse Public License v1.0 which accompanies this distribution, @@ -7,15 +7,27 @@ */ package org.opendaylight.controller.cluster.datastore; -import com.google.common.base.Preconditions; +import static java.util.Objects.requireNonNull; + +import akka.actor.ActorContext; import akka.actor.ActorRef; -import java.util.concurrent.ExecutionException; -import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier; -import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot; -import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils; +import com.google.common.io.ByteSource; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.OutputStream; +import java.util.Optional; +import org.opendaylight.controller.cluster.access.concepts.ClientIdentifier; +import org.opendaylight.controller.cluster.access.concepts.FrontendIdentifier; +import org.opendaylight.controller.cluster.access.concepts.FrontendType; +import org.opendaylight.controller.cluster.access.concepts.LocalHistoryIdentifier; +import org.opendaylight.controller.cluster.access.concepts.MemberName; +import org.opendaylight.controller.cluster.datastore.actors.ShardSnapshotActor; +import org.opendaylight.controller.cluster.datastore.persisted.ShardDataTreeSnapshot; +import org.opendaylight.controller.cluster.datastore.persisted.ShardSnapshotState; +import org.opendaylight.controller.cluster.io.InputOutputStreamFactory; import org.opendaylight.controller.cluster.raft.RaftActorSnapshotCohort; -import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier; -import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode; +import org.opendaylight.controller.cluster.raft.persisted.Snapshot; +import org.opendaylight.controller.cluster.raft.persisted.Snapshot.State; import org.slf4j.Logger; /** @@ -23,41 +35,58 @@ import org.slf4j.Logger; * * @author Thomas Pantelis */ -class ShardSnapshotCohort implements RaftActorSnapshotCohort { - - private static final YangInstanceIdentifier DATASTORE_ROOT = YangInstanceIdentifier.builder().build(); +final class ShardSnapshotCohort implements RaftActorSnapshotCohort { + private static final FrontendType SNAPSHOT_APPLY = FrontendType.forName("snapshot-apply"); - private int createSnapshotTransactionCounter; - private final ShardTransactionActorFactory transactionActorFactory; + private final InputOutputStreamFactory streamFactory; + private final ActorRef snapshotActor; private final ShardDataTree store; - private final Logger log; private final String logId; + private final Logger log; - ShardSnapshotCohort(ShardTransactionActorFactory transactionActorFactory, ShardDataTree store, - Logger log, String logId) { - this.transactionActorFactory = transactionActorFactory; - this.store = Preconditions.checkNotNull(store); + ShardSnapshotCohort(final InputOutputStreamFactory streamFactory, final LocalHistoryIdentifier applyHistoryId, + final ActorRef snapshotActor, final ShardDataTree store, final Logger log, final String logId) { + this.streamFactory = requireNonNull(streamFactory); + this.snapshotActor = requireNonNull(snapshotActor); + this.store = requireNonNull(store); this.log = log; this.logId = logId; } - @Override - public void createSnapshot(ActorRef actorRef) { - // Create a transaction actor. We are really going to treat the transaction as a worker - // so that this actor does not get block building the snapshot. THe transaction actor will - // after processing the CreateSnapshot message. + static ShardSnapshotCohort create(final ActorContext actorContext, final MemberName memberName, + final ShardDataTree store, final Logger log, final String logId, final DatastoreContext context) { + final LocalHistoryIdentifier applyHistoryId = new LocalHistoryIdentifier(ClientIdentifier.create( + FrontendIdentifier.create(memberName, SNAPSHOT_APPLY), 0), 0); + final String snapshotActorName = "shard-" + memberName.getName() + ':' + "snapshot-read"; + + final InputOutputStreamFactory streamFactory = context.isUseLz4Compression() + ? InputOutputStreamFactory.lz4("256KB") : InputOutputStreamFactory.simple(); + // Create a snapshot actor. This actor will act as a worker to offload snapshot serialization for all + // requests. + final ActorRef snapshotActor = actorContext.actorOf(ShardSnapshotActor.props(streamFactory), + snapshotActorName); - ShardTransactionIdentifier transactionID = new ShardTransactionIdentifier( - "createSnapshot" + ++createSnapshotTransactionCounter); + return new ShardSnapshotCohort(streamFactory, applyHistoryId, snapshotActor, store, log, logId); + } - ActorRef createSnapshotTransaction = transactionActorFactory.newShardTransaction( - TransactionProxy.TransactionType.READ_ONLY, transactionID, "", DataStoreVersions.CURRENT_VERSION); + @Override + public void createSnapshot(final ActorRef actorRef, final Optional installSnapshotStream) { + // Forward the request to the snapshot actor + final ShardDataTreeSnapshot snapshot = store.takeStateSnapshot(); + log.debug("{}: requesting serialization of snapshot {}", logId, snapshot); - createSnapshotTransaction.tell(CreateSnapshot.INSTANCE, actorRef); + ShardSnapshotActor.requestSnapshot(snapshotActor, snapshot, installSnapshotStream, actorRef); } @Override - public void applySnapshot(byte[] snapshotBytes) { + @SuppressWarnings("checkstyle:IllegalCatch") + public void applySnapshot(final Snapshot.State snapshotState) { + if (!(snapshotState instanceof ShardSnapshotState)) { + log.debug("{}: applySnapshot ignoring snapshot: {}", logId, snapshotState); + } + + final ShardDataTreeSnapshot snapshot = ((ShardSnapshotState)snapshotState).getSnapshot(); + // Since this will be done only on Recovery or when this actor is a Follower // we can safely commit everything in here. We not need to worry about event notifications // as they would have already been disabled on the follower @@ -65,28 +94,19 @@ class ShardSnapshotCohort implements RaftActorSnapshotCohort { log.info("{}: Applying snapshot", logId); try { - ReadWriteShardDataTreeTransaction transaction = store.newReadWriteTransaction("snapshot-" + logId, null); - - NormalizedNode node = SerializationUtils.deserializeNormalizedNode(snapshotBytes); - - // delete everything first - transaction.getSnapshot().delete(DATASTORE_ROOT); - - // Add everything from the remote node back - transaction.getSnapshot().write(DATASTORE_ROOT, node); - syncCommitTransaction(transaction); - } catch (InterruptedException | ExecutionException e) { - log.error("{}: An exception occurred when applying snapshot", logId, e); - } finally { - log.info("{}: Done applying snapshot", logId); + store.applySnapshot(snapshot); + } catch (Exception e) { + log.error("{}: Failed to apply snapshot {}", logId, snapshot, e); + return; } + log.info("{}: Done applying snapshot", logId); } - void syncCommitTransaction(final ReadWriteShardDataTreeTransaction transaction) - throws ExecutionException, InterruptedException { - ShardDataTreeCohort commitCohort = store.finishTransaction(transaction); - commitCohort.preCommit().get(); - commitCohort.commit().get(); + @Override + public State deserializeSnapshot(final ByteSource snapshotBytes) throws IOException { + try (ObjectInputStream in = new ObjectInputStream(streamFactory.createInputStream(snapshotBytes))) { + return ShardDataTreeSnapshot.deserialize(in); + } } }