import io.atomix.storage.journal.StorageException.TooLarge;
import io.atomix.storage.journal.index.JournalIndex;
import io.netty.buffer.Unpooled;
+import java.io.EOFException;
import java.io.IOException;
import java.nio.MappedByteBuffer;
import org.eclipse.jdt.annotation.NonNull;
final var bytes = Unpooled.wrappedBuffer(diskEntry.position(HEADER_BYTES));
try {
mapper.objectToBytes(entry, bytes);
- } catch (IOException e) {
+ } catch (EOFException e) {
// We ran out of buffer space: let's decide who's fault it is:
if (writeLimit == maxEntrySize) {
// - it is the entry and/or mapper. This is not exactly accurate, as there may be other serialization
// - it is us, as we do not have the capacity to hold maxEntrySize bytes
LOG.trace("Tail serialization with {} bytes available failed", writeLimit, e);
return null;
+ } catch (IOException e) {
+ throw new StorageException(e);
}
// Determine length, trim distEntry and compute checksum. We are okay with computeChecksum() consuming