package org.opendaylight.controller.cluster.datastore.persisted;
import static com.google.common.base.Verify.verifyNotNull;
+import static com.google.common.math.IntMath.ceilingPowerOfTwo;
import static java.util.Objects.requireNonNull;
-import static org.opendaylight.controller.cluster.datastore.persisted.ChunkedOutputStream.MAX_ARRAY_SIZE;
import com.google.common.annotations.Beta;
import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.MoreObjects;
import com.google.common.io.ByteStreams;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.DataInput;
import java.io.StreamCorruptedException;
import java.util.AbstractMap.SimpleImmutableEntry;
import java.util.Map.Entry;
+import org.eclipse.jdt.annotation.NonNull;
import org.opendaylight.controller.cluster.access.concepts.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.persisted.DataTreeCandidateInputOutput.DataTreeCandidateWithVersion;
+import org.opendaylight.controller.cluster.io.ChunkedByteArray;
+import org.opendaylight.controller.cluster.io.ChunkedOutputStream;
import org.opendaylight.controller.cluster.raft.protobuff.client.messages.IdentifiablePayload;
-import org.opendaylight.yangtools.concepts.Variant;
+import org.opendaylight.yangtools.concepts.Either;
import org.opendaylight.yangtools.yang.data.api.schema.stream.ReusableStreamReceiver;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
import org.opendaylight.yangtools.yang.data.impl.schema.ReusableImmutableNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.tree.api.DataTreeCandidate;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
private static final Logger LOG = LoggerFactory.getLogger(CommitTransactionPayload.class);
private static final long serialVersionUID = 1L;
- private volatile Entry<TransactionIdentifier, DataTreeCandidate> candidate = null;
+ private static final int MAX_ARRAY_SIZE = ceilingPowerOfTwo(Integer.getInteger(
+ "org.opendaylight.controller.cluster.datastore.persisted.max-array-size", 256 * 1024));
- CommitTransactionPayload() {
+ private volatile Entry<TransactionIdentifier, DataTreeCandidateWithVersion> candidate = null;
+ CommitTransactionPayload() {
+ // hidden on purpose
}
- public static CommitTransactionPayload create(final TransactionIdentifier transactionId,
- final DataTreeCandidate candidate, final int initialSerializedBufferCapacity) throws IOException {
-
- final ChunkedOutputStream cos = new ChunkedOutputStream(initialSerializedBufferCapacity);
+ public static @NonNull CommitTransactionPayload create(final TransactionIdentifier transactionId,
+ final DataTreeCandidate candidate, final PayloadVersion version, final int initialSerializedBufferCapacity)
+ throws IOException {
+ final ChunkedOutputStream cos = new ChunkedOutputStream(initialSerializedBufferCapacity, MAX_ARRAY_SIZE);
try (DataOutputStream dos = new DataOutputStream(cos)) {
transactionId.writeTo(dos);
- DataTreeCandidateInputOutput.writeDataTreeCandidate(dos, candidate);
+ DataTreeCandidateInputOutput.writeDataTreeCandidate(dos, version, candidate);
}
- final Variant<byte[], ChunkedByteArray> source = cos.toVariant();
+ final Either<byte[], ChunkedByteArray> source = cos.toVariant();
LOG.debug("Initial buffer capacity {}, actual serialized size {}", initialSerializedBufferCapacity, cos.size());
return source.isFirst() ? new Simple(source.getFirst()) : new Chunked(source.getSecond());
}
@VisibleForTesting
- public static CommitTransactionPayload create(final TransactionIdentifier transactionId,
+ public static @NonNull CommitTransactionPayload create(final TransactionIdentifier transactionId,
+ final DataTreeCandidate candidate, final PayloadVersion version) throws IOException {
+ return create(transactionId, candidate, version, 512);
+ }
+
+ @VisibleForTesting
+ public static @NonNull CommitTransactionPayload create(final TransactionIdentifier transactionId,
final DataTreeCandidate candidate) throws IOException {
- return create(transactionId, candidate, 512);
+ return create(transactionId, candidate, PayloadVersion.current());
}
- public Entry<TransactionIdentifier, DataTreeCandidate> getCandidate() throws IOException {
- Entry<TransactionIdentifier, DataTreeCandidate> localCandidate = candidate;
+ public @NonNull Entry<TransactionIdentifier, DataTreeCandidateWithVersion> getCandidate() throws IOException {
+ Entry<TransactionIdentifier, DataTreeCandidateWithVersion> localCandidate = candidate;
if (localCandidate == null) {
synchronized (this) {
localCandidate = candidate;
return localCandidate;
}
- public final Entry<TransactionIdentifier, DataTreeCandidate> getCandidate(
+ public final @NonNull Entry<TransactionIdentifier, DataTreeCandidateWithVersion> getCandidate(
final ReusableStreamReceiver receiver) throws IOException {
final DataInput in = newDataInput();
return new SimpleImmutableEntry<>(TransactionIdentifier.readFrom(in),
DataTreeCandidateInputOutput.readDataTreeCandidate(in, receiver));
}
+ @Override
public TransactionIdentifier getIdentifier() {
try {
return getCandidate().getKey();
}
}
+ /**
+ * The cached candidate needs to be cleared after it is done applying to the DataTree, otherwise it would be keeping
+ * deserialized in memory which are not needed anymore leading to wasted memory. This lets the payload know that
+ * this was the last time the candidate was needed ant it is safe to be cleared.
+ */
+ public Entry<TransactionIdentifier, DataTreeCandidateWithVersion> acquireCandidate() throws IOException {
+ final Entry<TransactionIdentifier, DataTreeCandidateWithVersion> localCandidate = getCandidate();
+ candidate = null;
+ return localCandidate;
+ }
+
+ @Override
+ public final String toString() {
+ final var helper = MoreObjects.toStringHelper(this);
+ final var localCandidate = candidate;
+ if (localCandidate != null) {
+ helper.add("identifier", candidate.getKey());
+ }
+ return helper.add("size", size()).toString();
+ }
+
abstract void writeBytes(ObjectOutput out) throws IOException;
abstract DataInput newDataInput();