From f429a35cbc01ed5e37b730f5b451aaa9cec213de Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Thu, 28 Sep 2023 03:42:25 +0300 Subject: [PATCH 01/36] initial commit --- .../AbstractInMemoryDao.java | 60 +++++++++++++++++++ .../kovalchukvladislav/MemorySegmentDao.java | 33 ++++++++++ .../MemorySegmentDaoFactory.java | 43 +++++++++++++ 3 files changed, 136 insertions(+) create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java create mode 100644 src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java new file mode 100644 index 000000000..41abedf81 --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java @@ -0,0 +1,60 @@ +package ru.vk.itmo.kovalchukvladislav; + +import java.io.IOException; +import java.util.Comparator; +import java.util.Iterator; +import java.util.concurrent.ConcurrentNavigableMap; +import java.util.concurrent.ConcurrentSkipListMap; + +import ru.vk.itmo.Dao; +import ru.vk.itmo.Entry; + +public abstract class AbstractInMemoryDao> implements Dao { + private final ConcurrentNavigableMap dao; + + protected AbstractInMemoryDao(Comparator comparator) { + dao = new ConcurrentSkipListMap<>(comparator); + } + + @Override + public Iterator get(D from, D to) { + if (from == null && to == null) { + return all(); + } else if (from == null) { + return allTo(to); + } else if (to == null) { + return allFrom(from); + } + return dao.subMap(from, true, to, false).values().iterator(); + } + + @Override + public E get(D key) { + return dao.get(key); + } + + @Override + public void upsert(E entry) { + dao.put(entry.key(), entry); + } + + @Override + public Iterator allFrom(D from) { + return dao.tailMap(from, false).values().iterator(); + } + + @Override + public Iterator allTo(D to) { + return dao.headMap(to, false).values().iterator(); + } + + @Override + public Iterator all() { + return dao.values().iterator(); + } + + @Override + public void flush() throws IOException { + dao.clear(); + } +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java new file mode 100644 index 000000000..4357926a9 --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java @@ -0,0 +1,33 @@ +package ru.vk.itmo.kovalchukvladislav; + +import java.lang.foreign.MemorySegment; +import java.lang.foreign.ValueLayout; +import java.util.Comparator; + +import ru.vk.itmo.Entry; + +public class MemorySegmentDao extends AbstractInMemoryDao> { + private static final Comparator COMPARATOR = getComparator(); + + public MemorySegmentDao() { + super(COMPARATOR); + } + + private static Comparator getComparator() { + return (Comparator) (a, b) -> { + long diffIndex = a.mismatch(b); + if (diffIndex == -1) { + return 0; + } else if (diffIndex == a.byteSize()) { + return -1; + } else if (diffIndex == b.byteSize()) { + return 1; + } + + byte aByte = a.getAtIndex(ValueLayout.JAVA_BYTE, diffIndex); + byte bByte = b.getAtIndex(ValueLayout.JAVA_BYTE, diffIndex); + return Byte.compare(aByte, bByte); + }; + } +} + diff --git a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java new file mode 100644 index 000000000..aba7d565d --- /dev/null +++ b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java @@ -0,0 +1,43 @@ +package ru.vk.itmo.test.kovalchukvladislav; + +import java.lang.foreign.MemorySegment; +import java.lang.foreign.ValueLayout; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; + +import ru.vk.itmo.Dao; +import ru.vk.itmo.Entry; +import ru.vk.itmo.kovalchukvladislav.MemorySegmentDao; +import ru.vk.itmo.test.DaoFactory; + +@DaoFactory +public class MemorySegmentDaoFactory implements DaoFactory.Factory> { + private static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8; + private static final ValueLayout.OfByte DEFAULT_VALUE_LAYOUT = ValueLayout.JAVA_BYTE; + + @Override + public Dao> createDao() { + return new MemorySegmentDao(); + } + + @Override + public String toString(MemorySegment memorySegment) { + if (memorySegment == null) { + return null; + } + return new String(memorySegment.toArray(DEFAULT_VALUE_LAYOUT), DEFAULT_CHARSET); + } + + @Override + public MemorySegment fromString(String data) { + if (data == null) { + return null; + } + return MemorySegment.ofArray(data.getBytes(DEFAULT_CHARSET)); + } + + @Override + public Entry fromBaseEntry(Entry baseEntry) { + return baseEntry; + } +} From fa1500735d47cd07068f5645becf063581ca8f8d Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Thu, 28 Sep 2023 03:43:55 +0300 Subject: [PATCH 02/36] remove wrong flush --- .../ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java index 41abedf81..1b6a89d86 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java @@ -1,6 +1,5 @@ package ru.vk.itmo.kovalchukvladislav; -import java.io.IOException; import java.util.Comparator; import java.util.Iterator; import java.util.concurrent.ConcurrentNavigableMap; @@ -52,9 +51,4 @@ public Iterator allTo(D to) { public Iterator all() { return dao.values().iterator(); } - - @Override - public void flush() throws IOException { - dao.clear(); - } } From e1651231517e8083684cacc87267f70f30ef34ad Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Thu, 28 Sep 2023 03:52:22 +0300 Subject: [PATCH 03/36] add null safety --- .../ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java index 1b6a89d86..363356da7 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java @@ -39,11 +39,17 @@ public void upsert(E entry) { @Override public Iterator allFrom(D from) { + if (from == null) { + return all(); + } return dao.tailMap(from, false).values().iterator(); } @Override public Iterator allTo(D to) { + if (to == null) { + return all(); + } return dao.headMap(to, false).values().iterator(); } From 07959cf114030c039954352dd2ff8bebfa62cc21 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Thu, 28 Sep 2023 04:13:57 +0300 Subject: [PATCH 04/36] checkstyle fix --- .../itmo/kovalchukvladislav/AbstractInMemoryDao.java | 6 +++--- .../vk/itmo/kovalchukvladislav/MemorySegmentDao.java | 10 +++++----- .../kovalchukvladislav/MemorySegmentDaoFactory.java | 10 +++++----- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java index 363356da7..c26d439df 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java @@ -1,13 +1,13 @@ package ru.vk.itmo.kovalchukvladislav; +import ru.vk.itmo.Dao; +import ru.vk.itmo.Entry; + import java.util.Comparator; import java.util.Iterator; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; -import ru.vk.itmo.Dao; -import ru.vk.itmo.Entry; - public abstract class AbstractInMemoryDao> implements Dao { private final ConcurrentNavigableMap dao; diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java index 4357926a9..12f42ce1d 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java @@ -1,11 +1,11 @@ package ru.vk.itmo.kovalchukvladislav; +import ru.vk.itmo.Entry; + import java.lang.foreign.MemorySegment; import java.lang.foreign.ValueLayout; import java.util.Comparator; -import ru.vk.itmo.Entry; - public class MemorySegmentDao extends AbstractInMemoryDao> { private static final Comparator COMPARATOR = getComparator(); @@ -24,9 +24,9 @@ private static Comparator getComparator() { return 1; } - byte aByte = a.getAtIndex(ValueLayout.JAVA_BYTE, diffIndex); - byte bByte = b.getAtIndex(ValueLayout.JAVA_BYTE, diffIndex); - return Byte.compare(aByte, bByte); + byte byteA = a.getAtIndex(ValueLayout.JAVA_BYTE, diffIndex); + byte byteB = b.getAtIndex(ValueLayout.JAVA_BYTE, diffIndex); + return Byte.compare(byteA, byteB); }; } } diff --git a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java index aba7d565d..c63942539 100644 --- a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java +++ b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java @@ -1,15 +1,15 @@ package ru.vk.itmo.test.kovalchukvladislav; -import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; -import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; - import ru.vk.itmo.Dao; import ru.vk.itmo.Entry; import ru.vk.itmo.kovalchukvladislav.MemorySegmentDao; import ru.vk.itmo.test.DaoFactory; +import java.lang.foreign.MemorySegment; +import java.lang.foreign.ValueLayout; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; + @DaoFactory public class MemorySegmentDaoFactory implements DaoFactory.Factory> { private static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8; From d9fb3556ebf68017e8e2e17a527848900b117a30 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Thu, 28 Sep 2023 04:27:28 +0300 Subject: [PATCH 05/36] remove extra ifs & add unsafe methods --- .../AbstractInMemoryDao.java | 30 +++++++++++++------ 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java index c26d439df..5f1f57b14 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java @@ -20,9 +20,9 @@ public Iterator get(D from, D to) { if (from == null && to == null) { return all(); } else if (from == null) { - return allTo(to); + return allToUnsafe(to); } else if (to == null) { - return allFrom(from); + return allFromUnsafe(from); } return dao.subMap(from, true, to, false).values().iterator(); } @@ -39,17 +39,29 @@ public void upsert(E entry) { @Override public Iterator allFrom(D from) { - if (from == null) { - return all(); - } - return dao.tailMap(from, false).values().iterator(); + return from == null ? all() : allFromUnsafe(from); + } + + /** + * Doesn't check the argument for null. Should be called only if there was a check before + * @param from NotNull lower bound of range (inclusive) + * @return entries with key >= from + */ + private Iterator allFromUnsafe(D from) { + return dao.tailMap(from, true).values().iterator(); } @Override public Iterator allTo(D to) { - if (to == null) { - return all(); - } + return to == null ? all() : allToUnsafe(to); + } + + /** + * Doesn't check the argument for null. Should be called only if there was a check before + * @param to NotNull upper bound of range (exclusive) + * @return upper bound of range (exclusive) + */ + private Iterator allToUnsafe(D to) { return dao.headMap(to, false).values().iterator(); } From d822985dbbfb7d8de9ca91b3ffa1ef8d6eadc012 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Thu, 28 Sep 2023 04:32:25 +0300 Subject: [PATCH 06/36] comment fix --- .../java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java index 5f1f57b14..c2c1d0538 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java @@ -59,7 +59,7 @@ public Iterator allTo(D to) { /** * Doesn't check the argument for null. Should be called only if there was a check before * @param to NotNull upper bound of range (exclusive) - * @return upper bound of range (exclusive) + * @return entries with key < to */ private Iterator allToUnsafe(D to) { return dao.headMap(to, false).values().iterator(); From 4a264e8d33a577bb16ead1b01753714c24677b0b Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Thu, 28 Sep 2023 04:35:22 +0300 Subject: [PATCH 07/36] just move constant to static --- .../ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java | 5 +++-- .../test/kovalchukvladislav/MemorySegmentDaoFactory.java | 8 ++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java index 12f42ce1d..e573bb058 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java @@ -8,6 +8,7 @@ public class MemorySegmentDao extends AbstractInMemoryDao> { private static final Comparator COMPARATOR = getComparator(); + private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; public MemorySegmentDao() { super(COMPARATOR); @@ -24,8 +25,8 @@ private static Comparator getComparator() { return 1; } - byte byteA = a.getAtIndex(ValueLayout.JAVA_BYTE, diffIndex); - byte byteB = b.getAtIndex(ValueLayout.JAVA_BYTE, diffIndex); + byte byteA = a.getAtIndex(VALUE_LAYOUT, diffIndex); + byte byteB = b.getAtIndex(VALUE_LAYOUT, diffIndex); return Byte.compare(byteA, byteB); }; } diff --git a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java index c63942539..dd8963f94 100644 --- a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java +++ b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java @@ -12,8 +12,8 @@ @DaoFactory public class MemorySegmentDaoFactory implements DaoFactory.Factory> { - private static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8; - private static final ValueLayout.OfByte DEFAULT_VALUE_LAYOUT = ValueLayout.JAVA_BYTE; + private static final Charset CHARSET = StandardCharsets.UTF_8; + private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; @Override public Dao> createDao() { @@ -25,7 +25,7 @@ public String toString(MemorySegment memorySegment) { if (memorySegment == null) { return null; } - return new String(memorySegment.toArray(DEFAULT_VALUE_LAYOUT), DEFAULT_CHARSET); + return new String(memorySegment.toArray(VALUE_LAYOUT), CHARSET); } @Override @@ -33,7 +33,7 @@ public MemorySegment fromString(String data) { if (data == null) { return null; } - return MemorySegment.ofArray(data.getBytes(DEFAULT_CHARSET)); + return MemorySegment.ofArray(data.getBytes(CHARSET)); } @Override From f92dce9516bc901bb17d4fd43ecd2d12500b419c Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Thu, 28 Sep 2023 16:12:33 +0300 Subject: [PATCH 08/36] review fix --- .../ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java index c2c1d0538..d3ae8c9f5 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java @@ -24,7 +24,7 @@ public Iterator get(D from, D to) { } else if (to == null) { return allFromUnsafe(from); } - return dao.subMap(from, true, to, false).values().iterator(); + return dao.subMap(from, to).values().iterator(); } @Override @@ -48,7 +48,7 @@ public Iterator allFrom(D from) { * @return entries with key >= from */ private Iterator allFromUnsafe(D from) { - return dao.tailMap(from, true).values().iterator(); + return dao.tailMap(from).values().iterator(); } @Override @@ -62,7 +62,7 @@ public Iterator allTo(D to) { * @return entries with key < to */ private Iterator allToUnsafe(D to) { - return dao.headMap(to, false).values().iterator(); + return dao.headMap(to).values().iterator(); } @Override From 5ac9637ce6dc5b2e4ba32ba54c8e54cd18f7d508 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 4 Oct 2023 06:07:33 +0300 Subject: [PATCH 09/36] initial commit --- .../AbstractInMemoryDao.java | 254 +++++++++++++++++- .../kovalchukvladislav/MemorySegmentDao.java | 31 ++- .../MemorySegmentSerializer.java | 14 + .../MemorySegmentDaoFactory.java | 9 +- 4 files changed, 299 insertions(+), 9 deletions(-) create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java index d3ae8c9f5..51c67767e 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java @@ -1,18 +1,56 @@ package ru.vk.itmo.kovalchukvladislav; +import ru.vk.itmo.Config; import ru.vk.itmo.Dao; import ru.vk.itmo.Entry; +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.io.UncheckedIOException; +import java.lang.foreign.Arena; +import java.lang.foreign.MemorySegment; +import java.lang.foreign.ValueLayout; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; import java.util.Comparator; import java.util.Iterator; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; +/** + * Saves state (two files) when closed in config.basePath()
directory. + * + *

Directory contains two files: + *

  • + * db with sorted by key entries + *
  • + *
  • + * offsets with keys offsets at db(in bytes) + *
  • + * + *

    This allows search entries using a binary search after db reading (usually less due to missing values) + */ public abstract class AbstractInMemoryDao> implements Dao { + private static final String DB_FILENAME = "db"; + private static final int SIZE_LENGTH = Long.BYTES; + private static final String OFFSETS_FILENAME = "offsets"; + + private final Path basePath; + private volatile boolean closed = false; + private volatile long[] storageOffsets = null; + private final Comparator comparator; private final ConcurrentNavigableMap dao; + private final MemorySegmentSerializer serializer; - protected AbstractInMemoryDao(Comparator comparator) { - dao = new ConcurrentSkipListMap<>(comparator); + protected AbstractInMemoryDao(Config config, + Comparator comparator, + MemorySegmentSerializer serializer) { + this.comparator = comparator; + this.serializer = serializer; + this.dao = new ConcurrentSkipListMap<>(comparator); + this.basePath = config.basePath(); } @Override @@ -29,7 +67,22 @@ public Iterator get(D from, D to) { @Override public E get(D key) { - return dao.get(key); + E e = dao.get(key); + if (e != null) { + return e; + } + E fromFile = findInStorage(key); + if (fromFile == null) { + return null; + } + E previousValue = dao.putIfAbsent(key, fromFile); + if (previousValue != null) { + // If new value was putted while we were looking for in storage, just return it + // Maybe should return previousValue, as value which was stored when method called + // But this is concurrency, there are no guarantees + return previousValue; + } + return fromFile; } @Override @@ -44,6 +97,7 @@ public Iterator allFrom(D from) { /** * Doesn't check the argument for null. Should be called only if there was a check before + * * @param from NotNull lower bound of range (inclusive) * @return entries with key >= from */ @@ -58,6 +112,7 @@ public Iterator allTo(D to) { /** * Doesn't check the argument for null. Should be called only if there was a check before + * * @param to NotNull upper bound of range (exclusive) * @return entries with key < to */ @@ -69,4 +124,197 @@ private Iterator allToUnsafe(D to) { public Iterator all() { return dao.values().iterator(); } + + @Override + public synchronized void flush() throws IOException { + long[] offsets = writeEntries(); + writeOffsets(offsets); + } + + @Override + public synchronized void close() throws IOException { + if (!closed) { + flush(); + closed = true; + } + } + + // =================================== + // Reading values + // =================================== + + private D readValue(MemorySegment memorySegment, long offset) { + long size = memorySegment.get(ValueLayout.JAVA_LONG_UNALIGNED, offset); + MemorySegment valueSegment = memorySegment.asSlice(offset + SIZE_LENGTH, size); + return serializer.toValue(valueSegment); + } + + // Return new offset + private long writeValue(D value, MemorySegment memorySegment, long offset) { + MemorySegment valueSegment = serializer.fromValue(value); + long size = valueSegment.byteSize(); + + memorySegment.set(ValueLayout.JAVA_LONG_UNALIGNED, offset, size); + MemorySegment.copy(valueSegment, 0, memorySegment, offset + SIZE_LENGTH, size); + return offset + SIZE_LENGTH + size; + } + + // =================================== + // Reading offsets and data + // =================================== + + /** + * Read offsets from file.
    + * If file doesn't exist will be filled with empty array and output a warning message.
    + * After completion of work storageOffsets not null. + */ + private synchronized void readOffsets() { + if (storageOffsets != null) { + return; + } + File offsetsFile = basePath.resolve(OFFSETS_FILENAME).toFile(); + if (!offsetsFile.exists()) { + // Предположим что тут и далее нормальный логгер, и мы вызываем log.warning(), log.error() вместо этого + System.out.println( + "[WARN] Previous saved data in path: " + offsetsFile.getPath() + " didn't found." + + "It's ok if this storage launches first time or didn't save data before" + ); + this.storageOffsets = new long[] {}; + return; + } + + try (RandomAccessFile file = new RandomAccessFile(offsetsFile, "r"); + FileChannel channel = file.getChannel(); + Arena arena = Arena.ofConfined()) { + + MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size(), arena); + this.storageOffsets = fileSegment.toArray(ValueLayout.OfLong.JAVA_LONG); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private E findInStorage(D key) { + if (storageOffsets == null) { + readOffsets(); + } + if (storageOffsets.length == 0) { + return null; + } + + File databaseFile = basePath.resolve(DB_FILENAME).toFile(); + if (!databaseFile.exists()) { + System.out.println( + "[ERROR] Previous saved data in path: " + databaseFile.getPath() + " didn't found, " + + "but offsets file exist." + ); + return null; + } + + try (RandomAccessFile file = new RandomAccessFile(databaseFile, "r"); + FileChannel channel = file.getChannel()) { + Arena arena = Arena.ofAuto(); + MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size(), arena); + + // binary search + D foundedKey = null; + int leftOffsetIndex = -1; // offset of element <= key + int rightOffsetIndex = storageOffsets.length; // offset of element > key + + while (leftOffsetIndex + 1 < rightOffsetIndex) { + int middleOffsetIndex = leftOffsetIndex + (rightOffsetIndex - leftOffsetIndex) / 2; + long middleOffset = storageOffsets[middleOffsetIndex]; + + D currentKey = readValue(fileSegment, middleOffset); + int compared = comparator.compare(currentKey, key); + + if (compared < 0) { + leftOffsetIndex = middleOffsetIndex; + } else if (compared > 0) { + rightOffsetIndex = middleOffsetIndex; + } else { + leftOffsetIndex = middleOffsetIndex; + foundedKey = currentKey; + break; + } + } + if (foundedKey == null) { + // not found, element at leftOffset < key + return null; + } + + long valueOffset = storageOffsets[leftOffsetIndex] + SIZE_LENGTH + serializer.size(foundedKey); + D value = readValue(fileSegment, valueOffset); + + return serializer.createEntry(foundedKey, value); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + // =================================== + // Writing offsets and data + // =================================== + + // Return nullable offsets + + private long[] writeEntries() throws IOException { + Path resultPath = basePath.resolve(DB_FILENAME); + + try (FileChannel channel = FileChannel.open( + resultPath, + StandardOpenOption.READ, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING, + StandardOpenOption.CREATE); + Arena arena = Arena.ofConfined()) { + + long size = calculateInMemoryDAOSize(); + MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_WRITE, 0, size, arena); + + int i = 0; + long offset = 0; + long[] offsets = new long[dao.size()]; + for (E entry : dao.values()) { + offsets[i++] = offset; + offset = writeValue(entry.key(), fileSegment, offset); + offset = writeValue(entry.value(), fileSegment, offset); + } + fileSegment.load(); + return offsets; + } + } + + private void writeOffsets(long[] offsets) throws IOException { + Path resultPath = basePath.resolve(OFFSETS_FILENAME); + + try (FileChannel channel = FileChannel.open( + resultPath, + StandardOpenOption.READ, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING, + StandardOpenOption.CREATE); + Arena arena = Arena.ofConfined()) { + + long size = (long) offsets.length * SIZE_LENGTH; + MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_WRITE, 0, size, arena); + MemorySegment offsetsSegment = MemorySegment.ofArray(offsets); + MemorySegment.copy(offsetsSegment, 0, fileSegment, 0, size); + fileSegment.load(); + } + } + + // =================================== + // Some util methods + // =================================== + + private long calculateInMemoryDAOSize() { + long size = 0; + for (E entry : dao.values()) { + size += 2 * SIZE_LENGTH; + size += serializer.size(entry.key()); + size += serializer.size(entry.value()); + } + return size; + } } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java index e573bb058..62895a614 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java @@ -1,5 +1,7 @@ package ru.vk.itmo.kovalchukvladislav; +import ru.vk.itmo.BaseEntry; +import ru.vk.itmo.Config; import ru.vk.itmo.Entry; import java.lang.foreign.MemorySegment; @@ -7,11 +9,12 @@ import java.util.Comparator; public class MemorySegmentDao extends AbstractInMemoryDao> { - private static final Comparator COMPARATOR = getComparator(); private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; + private static final Serializer MEMORY_SEGMENT_SERIALIZER = new Serializer(); + private static final Comparator COMPARATOR = getComparator(); - public MemorySegmentDao() { - super(COMPARATOR); + public MemorySegmentDao(Config config) { + super(config, COMPARATOR, MEMORY_SEGMENT_SERIALIZER); } private static Comparator getComparator() { @@ -30,5 +33,27 @@ private static Comparator getComparator() { return Byte.compare(byteA, byteB); }; } + + private static class Serializer implements MemorySegmentSerializer> { + @Override + public MemorySegment toValue(MemorySegment input) { + return input; + } + + @Override + public MemorySegment fromValue(MemorySegment value) { + return value; + } + + @Override + public long size(MemorySegment value) { + return value.byteSize(); + } + + @Override + public Entry createEntry(MemorySegment key, MemorySegment value) { + return new BaseEntry<>(key, value); + } + } } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java new file mode 100644 index 000000000..bb947ebed --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java @@ -0,0 +1,14 @@ +package ru.vk.itmo.kovalchukvladislav; + +import ru.vk.itmo.Entry; +import java.lang.foreign.MemorySegment; + +public interface MemorySegmentSerializer> { + D toValue(MemorySegment input); + + MemorySegment fromValue(D value); + + long size(D value); + + E createEntry(D key, D value); +} diff --git a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java index dd8963f94..6657c68aa 100644 --- a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java +++ b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java @@ -1,23 +1,26 @@ package ru.vk.itmo.test.kovalchukvladislav; +import ru.vk.itmo.Config; import ru.vk.itmo.Dao; import ru.vk.itmo.Entry; import ru.vk.itmo.kovalchukvladislav.MemorySegmentDao; import ru.vk.itmo.test.DaoFactory; +import java.io.IOException; import java.lang.foreign.MemorySegment; import java.lang.foreign.ValueLayout; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; -@DaoFactory +@DaoFactory(stage = 2) public class MemorySegmentDaoFactory implements DaoFactory.Factory> { private static final Charset CHARSET = StandardCharsets.UTF_8; private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; + @Override - public Dao> createDao() { - return new MemorySegmentDao(); + public Dao> createDao(Config config) throws IOException { + return new MemorySegmentDao(config); } @Override From 91f9e50166c36d7e2e5195dec3fb84f7441a7505 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 4 Oct 2023 21:20:00 +0530 Subject: [PATCH 10/36] initial commit --- .../AbstractInMemoryDao.java | 254 +++++++++++++++++- .../kovalchukvladislav/MemorySegmentDao.java | 31 ++- .../MemorySegmentSerializer.java | 14 + .../MemorySegmentDaoFactory.java | 9 +- 4 files changed, 299 insertions(+), 9 deletions(-) create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java index d3ae8c9f5..51c67767e 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java @@ -1,18 +1,56 @@ package ru.vk.itmo.kovalchukvladislav; +import ru.vk.itmo.Config; import ru.vk.itmo.Dao; import ru.vk.itmo.Entry; +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.io.UncheckedIOException; +import java.lang.foreign.Arena; +import java.lang.foreign.MemorySegment; +import java.lang.foreign.ValueLayout; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; import java.util.Comparator; import java.util.Iterator; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; +/** + * Saves state (two files) when closed in config.basePath()
    directory. + * + *

    Directory contains two files: + *

  • + * db with sorted by key entries + *
  • + *
  • + * offsets with keys offsets at db(in bytes) + *
  • + * + *

    This allows search entries using a binary search after db reading (usually less due to missing values) + */ public abstract class AbstractInMemoryDao> implements Dao { + private static final String DB_FILENAME = "db"; + private static final int SIZE_LENGTH = Long.BYTES; + private static final String OFFSETS_FILENAME = "offsets"; + + private final Path basePath; + private volatile boolean closed = false; + private volatile long[] storageOffsets = null; + private final Comparator comparator; private final ConcurrentNavigableMap dao; + private final MemorySegmentSerializer serializer; - protected AbstractInMemoryDao(Comparator comparator) { - dao = new ConcurrentSkipListMap<>(comparator); + protected AbstractInMemoryDao(Config config, + Comparator comparator, + MemorySegmentSerializer serializer) { + this.comparator = comparator; + this.serializer = serializer; + this.dao = new ConcurrentSkipListMap<>(comparator); + this.basePath = config.basePath(); } @Override @@ -29,7 +67,22 @@ public Iterator get(D from, D to) { @Override public E get(D key) { - return dao.get(key); + E e = dao.get(key); + if (e != null) { + return e; + } + E fromFile = findInStorage(key); + if (fromFile == null) { + return null; + } + E previousValue = dao.putIfAbsent(key, fromFile); + if (previousValue != null) { + // If new value was putted while we were looking for in storage, just return it + // Maybe should return previousValue, as value which was stored when method called + // But this is concurrency, there are no guarantees + return previousValue; + } + return fromFile; } @Override @@ -44,6 +97,7 @@ public Iterator allFrom(D from) { /** * Doesn't check the argument for null. Should be called only if there was a check before + * * @param from NotNull lower bound of range (inclusive) * @return entries with key >= from */ @@ -58,6 +112,7 @@ public Iterator allTo(D to) { /** * Doesn't check the argument for null. Should be called only if there was a check before + * * @param to NotNull upper bound of range (exclusive) * @return entries with key < to */ @@ -69,4 +124,197 @@ private Iterator allToUnsafe(D to) { public Iterator all() { return dao.values().iterator(); } + + @Override + public synchronized void flush() throws IOException { + long[] offsets = writeEntries(); + writeOffsets(offsets); + } + + @Override + public synchronized void close() throws IOException { + if (!closed) { + flush(); + closed = true; + } + } + + // =================================== + // Reading values + // =================================== + + private D readValue(MemorySegment memorySegment, long offset) { + long size = memorySegment.get(ValueLayout.JAVA_LONG_UNALIGNED, offset); + MemorySegment valueSegment = memorySegment.asSlice(offset + SIZE_LENGTH, size); + return serializer.toValue(valueSegment); + } + + // Return new offset + private long writeValue(D value, MemorySegment memorySegment, long offset) { + MemorySegment valueSegment = serializer.fromValue(value); + long size = valueSegment.byteSize(); + + memorySegment.set(ValueLayout.JAVA_LONG_UNALIGNED, offset, size); + MemorySegment.copy(valueSegment, 0, memorySegment, offset + SIZE_LENGTH, size); + return offset + SIZE_LENGTH + size; + } + + // =================================== + // Reading offsets and data + // =================================== + + /** + * Read offsets from file.
    + * If file doesn't exist will be filled with empty array and output a warning message.
    + * After completion of work storageOffsets not null. + */ + private synchronized void readOffsets() { + if (storageOffsets != null) { + return; + } + File offsetsFile = basePath.resolve(OFFSETS_FILENAME).toFile(); + if (!offsetsFile.exists()) { + // Предположим что тут и далее нормальный логгер, и мы вызываем log.warning(), log.error() вместо этого + System.out.println( + "[WARN] Previous saved data in path: " + offsetsFile.getPath() + " didn't found." + + "It's ok if this storage launches first time or didn't save data before" + ); + this.storageOffsets = new long[] {}; + return; + } + + try (RandomAccessFile file = new RandomAccessFile(offsetsFile, "r"); + FileChannel channel = file.getChannel(); + Arena arena = Arena.ofConfined()) { + + MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size(), arena); + this.storageOffsets = fileSegment.toArray(ValueLayout.OfLong.JAVA_LONG); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private E findInStorage(D key) { + if (storageOffsets == null) { + readOffsets(); + } + if (storageOffsets.length == 0) { + return null; + } + + File databaseFile = basePath.resolve(DB_FILENAME).toFile(); + if (!databaseFile.exists()) { + System.out.println( + "[ERROR] Previous saved data in path: " + databaseFile.getPath() + " didn't found, " + + "but offsets file exist." + ); + return null; + } + + try (RandomAccessFile file = new RandomAccessFile(databaseFile, "r"); + FileChannel channel = file.getChannel()) { + Arena arena = Arena.ofAuto(); + MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size(), arena); + + // binary search + D foundedKey = null; + int leftOffsetIndex = -1; // offset of element <= key + int rightOffsetIndex = storageOffsets.length; // offset of element > key + + while (leftOffsetIndex + 1 < rightOffsetIndex) { + int middleOffsetIndex = leftOffsetIndex + (rightOffsetIndex - leftOffsetIndex) / 2; + long middleOffset = storageOffsets[middleOffsetIndex]; + + D currentKey = readValue(fileSegment, middleOffset); + int compared = comparator.compare(currentKey, key); + + if (compared < 0) { + leftOffsetIndex = middleOffsetIndex; + } else if (compared > 0) { + rightOffsetIndex = middleOffsetIndex; + } else { + leftOffsetIndex = middleOffsetIndex; + foundedKey = currentKey; + break; + } + } + if (foundedKey == null) { + // not found, element at leftOffset < key + return null; + } + + long valueOffset = storageOffsets[leftOffsetIndex] + SIZE_LENGTH + serializer.size(foundedKey); + D value = readValue(fileSegment, valueOffset); + + return serializer.createEntry(foundedKey, value); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + // =================================== + // Writing offsets and data + // =================================== + + // Return nullable offsets + + private long[] writeEntries() throws IOException { + Path resultPath = basePath.resolve(DB_FILENAME); + + try (FileChannel channel = FileChannel.open( + resultPath, + StandardOpenOption.READ, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING, + StandardOpenOption.CREATE); + Arena arena = Arena.ofConfined()) { + + long size = calculateInMemoryDAOSize(); + MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_WRITE, 0, size, arena); + + int i = 0; + long offset = 0; + long[] offsets = new long[dao.size()]; + for (E entry : dao.values()) { + offsets[i++] = offset; + offset = writeValue(entry.key(), fileSegment, offset); + offset = writeValue(entry.value(), fileSegment, offset); + } + fileSegment.load(); + return offsets; + } + } + + private void writeOffsets(long[] offsets) throws IOException { + Path resultPath = basePath.resolve(OFFSETS_FILENAME); + + try (FileChannel channel = FileChannel.open( + resultPath, + StandardOpenOption.READ, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING, + StandardOpenOption.CREATE); + Arena arena = Arena.ofConfined()) { + + long size = (long) offsets.length * SIZE_LENGTH; + MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_WRITE, 0, size, arena); + MemorySegment offsetsSegment = MemorySegment.ofArray(offsets); + MemorySegment.copy(offsetsSegment, 0, fileSegment, 0, size); + fileSegment.load(); + } + } + + // =================================== + // Some util methods + // =================================== + + private long calculateInMemoryDAOSize() { + long size = 0; + for (E entry : dao.values()) { + size += 2 * SIZE_LENGTH; + size += serializer.size(entry.key()); + size += serializer.size(entry.value()); + } + return size; + } } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java index e573bb058..62895a614 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java @@ -1,5 +1,7 @@ package ru.vk.itmo.kovalchukvladislav; +import ru.vk.itmo.BaseEntry; +import ru.vk.itmo.Config; import ru.vk.itmo.Entry; import java.lang.foreign.MemorySegment; @@ -7,11 +9,12 @@ import java.util.Comparator; public class MemorySegmentDao extends AbstractInMemoryDao> { - private static final Comparator COMPARATOR = getComparator(); private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; + private static final Serializer MEMORY_SEGMENT_SERIALIZER = new Serializer(); + private static final Comparator COMPARATOR = getComparator(); - public MemorySegmentDao() { - super(COMPARATOR); + public MemorySegmentDao(Config config) { + super(config, COMPARATOR, MEMORY_SEGMENT_SERIALIZER); } private static Comparator getComparator() { @@ -30,5 +33,27 @@ private static Comparator getComparator() { return Byte.compare(byteA, byteB); }; } + + private static class Serializer implements MemorySegmentSerializer> { + @Override + public MemorySegment toValue(MemorySegment input) { + return input; + } + + @Override + public MemorySegment fromValue(MemorySegment value) { + return value; + } + + @Override + public long size(MemorySegment value) { + return value.byteSize(); + } + + @Override + public Entry createEntry(MemorySegment key, MemorySegment value) { + return new BaseEntry<>(key, value); + } + } } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java new file mode 100644 index 000000000..bb947ebed --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java @@ -0,0 +1,14 @@ +package ru.vk.itmo.kovalchukvladislav; + +import ru.vk.itmo.Entry; +import java.lang.foreign.MemorySegment; + +public interface MemorySegmentSerializer> { + D toValue(MemorySegment input); + + MemorySegment fromValue(D value); + + long size(D value); + + E createEntry(D key, D value); +} diff --git a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java index dd8963f94..6657c68aa 100644 --- a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java +++ b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java @@ -1,23 +1,26 @@ package ru.vk.itmo.test.kovalchukvladislav; +import ru.vk.itmo.Config; import ru.vk.itmo.Dao; import ru.vk.itmo.Entry; import ru.vk.itmo.kovalchukvladislav.MemorySegmentDao; import ru.vk.itmo.test.DaoFactory; +import java.io.IOException; import java.lang.foreign.MemorySegment; import java.lang.foreign.ValueLayout; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; -@DaoFactory +@DaoFactory(stage = 2) public class MemorySegmentDaoFactory implements DaoFactory.Factory> { private static final Charset CHARSET = StandardCharsets.UTF_8; private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; + @Override - public Dao> createDao() { - return new MemorySegmentDao(); + public Dao> createDao(Config config) throws IOException { + return new MemorySegmentDao(config); } @Override From 453b2908e9f27e4379ab27511fa20dc45a4f037d Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 4 Oct 2023 06:07:33 +0300 Subject: [PATCH 11/36] initial commit (cherry picked from commit 5ac9637ce6dc5b2e4ba32ba54c8e54cd18f7d508) --- .../AbstractInMemoryDao.java | 254 +++++++++++++++++- .../kovalchukvladislav/MemorySegmentDao.java | 31 ++- .../MemorySegmentSerializer.java | 14 + .../MemorySegmentDaoFactory.java | 9 +- 4 files changed, 299 insertions(+), 9 deletions(-) create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java index d3ae8c9f5..51c67767e 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java @@ -1,18 +1,56 @@ package ru.vk.itmo.kovalchukvladislav; +import ru.vk.itmo.Config; import ru.vk.itmo.Dao; import ru.vk.itmo.Entry; +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.io.UncheckedIOException; +import java.lang.foreign.Arena; +import java.lang.foreign.MemorySegment; +import java.lang.foreign.ValueLayout; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; import java.util.Comparator; import java.util.Iterator; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; +/** + * Saves state (two files) when closed in config.basePath()
    directory. + * + *

    Directory contains two files: + *

  • + * db with sorted by key entries + *
  • + *
  • + * offsets with keys offsets at db(in bytes) + *
  • + * + *

    This allows search entries using a binary search after db reading (usually less due to missing values) + */ public abstract class AbstractInMemoryDao> implements Dao { + private static final String DB_FILENAME = "db"; + private static final int SIZE_LENGTH = Long.BYTES; + private static final String OFFSETS_FILENAME = "offsets"; + + private final Path basePath; + private volatile boolean closed = false; + private volatile long[] storageOffsets = null; + private final Comparator comparator; private final ConcurrentNavigableMap dao; + private final MemorySegmentSerializer serializer; - protected AbstractInMemoryDao(Comparator comparator) { - dao = new ConcurrentSkipListMap<>(comparator); + protected AbstractInMemoryDao(Config config, + Comparator comparator, + MemorySegmentSerializer serializer) { + this.comparator = comparator; + this.serializer = serializer; + this.dao = new ConcurrentSkipListMap<>(comparator); + this.basePath = config.basePath(); } @Override @@ -29,7 +67,22 @@ public Iterator get(D from, D to) { @Override public E get(D key) { - return dao.get(key); + E e = dao.get(key); + if (e != null) { + return e; + } + E fromFile = findInStorage(key); + if (fromFile == null) { + return null; + } + E previousValue = dao.putIfAbsent(key, fromFile); + if (previousValue != null) { + // If new value was putted while we were looking for in storage, just return it + // Maybe should return previousValue, as value which was stored when method called + // But this is concurrency, there are no guarantees + return previousValue; + } + return fromFile; } @Override @@ -44,6 +97,7 @@ public Iterator allFrom(D from) { /** * Doesn't check the argument for null. Should be called only if there was a check before + * * @param from NotNull lower bound of range (inclusive) * @return entries with key >= from */ @@ -58,6 +112,7 @@ public Iterator allTo(D to) { /** * Doesn't check the argument for null. Should be called only if there was a check before + * * @param to NotNull upper bound of range (exclusive) * @return entries with key < to */ @@ -69,4 +124,197 @@ private Iterator allToUnsafe(D to) { public Iterator all() { return dao.values().iterator(); } + + @Override + public synchronized void flush() throws IOException { + long[] offsets = writeEntries(); + writeOffsets(offsets); + } + + @Override + public synchronized void close() throws IOException { + if (!closed) { + flush(); + closed = true; + } + } + + // =================================== + // Reading values + // =================================== + + private D readValue(MemorySegment memorySegment, long offset) { + long size = memorySegment.get(ValueLayout.JAVA_LONG_UNALIGNED, offset); + MemorySegment valueSegment = memorySegment.asSlice(offset + SIZE_LENGTH, size); + return serializer.toValue(valueSegment); + } + + // Return new offset + private long writeValue(D value, MemorySegment memorySegment, long offset) { + MemorySegment valueSegment = serializer.fromValue(value); + long size = valueSegment.byteSize(); + + memorySegment.set(ValueLayout.JAVA_LONG_UNALIGNED, offset, size); + MemorySegment.copy(valueSegment, 0, memorySegment, offset + SIZE_LENGTH, size); + return offset + SIZE_LENGTH + size; + } + + // =================================== + // Reading offsets and data + // =================================== + + /** + * Read offsets from file.
    + * If file doesn't exist will be filled with empty array and output a warning message.
    + * After completion of work storageOffsets not null. + */ + private synchronized void readOffsets() { + if (storageOffsets != null) { + return; + } + File offsetsFile = basePath.resolve(OFFSETS_FILENAME).toFile(); + if (!offsetsFile.exists()) { + // Предположим что тут и далее нормальный логгер, и мы вызываем log.warning(), log.error() вместо этого + System.out.println( + "[WARN] Previous saved data in path: " + offsetsFile.getPath() + " didn't found." + + "It's ok if this storage launches first time or didn't save data before" + ); + this.storageOffsets = new long[] {}; + return; + } + + try (RandomAccessFile file = new RandomAccessFile(offsetsFile, "r"); + FileChannel channel = file.getChannel(); + Arena arena = Arena.ofConfined()) { + + MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size(), arena); + this.storageOffsets = fileSegment.toArray(ValueLayout.OfLong.JAVA_LONG); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private E findInStorage(D key) { + if (storageOffsets == null) { + readOffsets(); + } + if (storageOffsets.length == 0) { + return null; + } + + File databaseFile = basePath.resolve(DB_FILENAME).toFile(); + if (!databaseFile.exists()) { + System.out.println( + "[ERROR] Previous saved data in path: " + databaseFile.getPath() + " didn't found, " + + "but offsets file exist." + ); + return null; + } + + try (RandomAccessFile file = new RandomAccessFile(databaseFile, "r"); + FileChannel channel = file.getChannel()) { + Arena arena = Arena.ofAuto(); + MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size(), arena); + + // binary search + D foundedKey = null; + int leftOffsetIndex = -1; // offset of element <= key + int rightOffsetIndex = storageOffsets.length; // offset of element > key + + while (leftOffsetIndex + 1 < rightOffsetIndex) { + int middleOffsetIndex = leftOffsetIndex + (rightOffsetIndex - leftOffsetIndex) / 2; + long middleOffset = storageOffsets[middleOffsetIndex]; + + D currentKey = readValue(fileSegment, middleOffset); + int compared = comparator.compare(currentKey, key); + + if (compared < 0) { + leftOffsetIndex = middleOffsetIndex; + } else if (compared > 0) { + rightOffsetIndex = middleOffsetIndex; + } else { + leftOffsetIndex = middleOffsetIndex; + foundedKey = currentKey; + break; + } + } + if (foundedKey == null) { + // not found, element at leftOffset < key + return null; + } + + long valueOffset = storageOffsets[leftOffsetIndex] + SIZE_LENGTH + serializer.size(foundedKey); + D value = readValue(fileSegment, valueOffset); + + return serializer.createEntry(foundedKey, value); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + // =================================== + // Writing offsets and data + // =================================== + + // Return nullable offsets + + private long[] writeEntries() throws IOException { + Path resultPath = basePath.resolve(DB_FILENAME); + + try (FileChannel channel = FileChannel.open( + resultPath, + StandardOpenOption.READ, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING, + StandardOpenOption.CREATE); + Arena arena = Arena.ofConfined()) { + + long size = calculateInMemoryDAOSize(); + MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_WRITE, 0, size, arena); + + int i = 0; + long offset = 0; + long[] offsets = new long[dao.size()]; + for (E entry : dao.values()) { + offsets[i++] = offset; + offset = writeValue(entry.key(), fileSegment, offset); + offset = writeValue(entry.value(), fileSegment, offset); + } + fileSegment.load(); + return offsets; + } + } + + private void writeOffsets(long[] offsets) throws IOException { + Path resultPath = basePath.resolve(OFFSETS_FILENAME); + + try (FileChannel channel = FileChannel.open( + resultPath, + StandardOpenOption.READ, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING, + StandardOpenOption.CREATE); + Arena arena = Arena.ofConfined()) { + + long size = (long) offsets.length * SIZE_LENGTH; + MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_WRITE, 0, size, arena); + MemorySegment offsetsSegment = MemorySegment.ofArray(offsets); + MemorySegment.copy(offsetsSegment, 0, fileSegment, 0, size); + fileSegment.load(); + } + } + + // =================================== + // Some util methods + // =================================== + + private long calculateInMemoryDAOSize() { + long size = 0; + for (E entry : dao.values()) { + size += 2 * SIZE_LENGTH; + size += serializer.size(entry.key()); + size += serializer.size(entry.value()); + } + return size; + } } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java index e573bb058..62895a614 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java @@ -1,5 +1,7 @@ package ru.vk.itmo.kovalchukvladislav; +import ru.vk.itmo.BaseEntry; +import ru.vk.itmo.Config; import ru.vk.itmo.Entry; import java.lang.foreign.MemorySegment; @@ -7,11 +9,12 @@ import java.util.Comparator; public class MemorySegmentDao extends AbstractInMemoryDao> { - private static final Comparator COMPARATOR = getComparator(); private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; + private static final Serializer MEMORY_SEGMENT_SERIALIZER = new Serializer(); + private static final Comparator COMPARATOR = getComparator(); - public MemorySegmentDao() { - super(COMPARATOR); + public MemorySegmentDao(Config config) { + super(config, COMPARATOR, MEMORY_SEGMENT_SERIALIZER); } private static Comparator getComparator() { @@ -30,5 +33,27 @@ private static Comparator getComparator() { return Byte.compare(byteA, byteB); }; } + + private static class Serializer implements MemorySegmentSerializer> { + @Override + public MemorySegment toValue(MemorySegment input) { + return input; + } + + @Override + public MemorySegment fromValue(MemorySegment value) { + return value; + } + + @Override + public long size(MemorySegment value) { + return value.byteSize(); + } + + @Override + public Entry createEntry(MemorySegment key, MemorySegment value) { + return new BaseEntry<>(key, value); + } + } } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java new file mode 100644 index 000000000..bb947ebed --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java @@ -0,0 +1,14 @@ +package ru.vk.itmo.kovalchukvladislav; + +import ru.vk.itmo.Entry; +import java.lang.foreign.MemorySegment; + +public interface MemorySegmentSerializer> { + D toValue(MemorySegment input); + + MemorySegment fromValue(D value); + + long size(D value); + + E createEntry(D key, D value); +} diff --git a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java index dd8963f94..6657c68aa 100644 --- a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java +++ b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java @@ -1,23 +1,26 @@ package ru.vk.itmo.test.kovalchukvladislav; +import ru.vk.itmo.Config; import ru.vk.itmo.Dao; import ru.vk.itmo.Entry; import ru.vk.itmo.kovalchukvladislav.MemorySegmentDao; import ru.vk.itmo.test.DaoFactory; +import java.io.IOException; import java.lang.foreign.MemorySegment; import java.lang.foreign.ValueLayout; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; -@DaoFactory +@DaoFactory(stage = 2) public class MemorySegmentDaoFactory implements DaoFactory.Factory> { private static final Charset CHARSET = StandardCharsets.UTF_8; private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; + @Override - public Dao> createDao() { - return new MemorySegmentDao(); + public Dao> createDao(Config config) throws IOException { + return new MemorySegmentDao(config); } @Override From 1a2797f53b3ea3e1461b88a42ab80dc1677ef3bf Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Thu, 5 Oct 2023 07:31:48 +0300 Subject: [PATCH 12/36] codeclimate --- .../AbstractInMemoryDao.java | 136 ++++++++++-------- .../MemorySegmentDaoFactory.java | 1 - 2 files changed, 80 insertions(+), 57 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java index 51c67767e..970891e73 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java @@ -12,12 +12,15 @@ import java.lang.foreign.MemorySegment; import java.lang.foreign.ValueLayout; import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.OpenOption; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.Comparator; import java.util.Iterator; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; +import java.util.logging.Logger; /** * Saves state (two files) when closed in config.basePath()
    directory. @@ -30,19 +33,21 @@ * offsets with keys offsets at db(in bytes) * * - *

    This allows search entries using a binary search after db reading (usually less due to missing values) + *

    This allows use binary search after db reading (usually file much less) */ public abstract class AbstractInMemoryDao> implements Dao { private static final String DB_FILENAME = "db"; private static final int SIZE_LENGTH = Long.BYTES; private static final String OFFSETS_FILENAME = "offsets"; + private static final ValueLayout.OfLong LONG_LAYOUT = ValueLayout.JAVA_LONG_UNALIGNED; private final Path basePath; - private volatile boolean closed = false; - private volatile long[] storageOffsets = null; + private volatile boolean closed; + private volatile MemorySegment storageOffsets; private final Comparator comparator; private final ConcurrentNavigableMap dao; private final MemorySegmentSerializer serializer; + private final Logger logger = Logger.getLogger("InMemoryDao"); protected AbstractInMemoryDao(Config config, Comparator comparator, @@ -127,8 +132,8 @@ public Iterator all() { @Override public synchronized void flush() throws IOException { - long[] offsets = writeEntries(); - writeOffsets(offsets); + writeData(); + this.storageOffsets = null; } @Override @@ -144,7 +149,7 @@ public synchronized void close() throws IOException { // =================================== private D readValue(MemorySegment memorySegment, long offset) { - long size = memorySegment.get(ValueLayout.JAVA_LONG_UNALIGNED, offset); + long size = memorySegment.get(LONG_LAYOUT, offset); MemorySegment valueSegment = memorySegment.asSlice(offset + SIZE_LENGTH, size); return serializer.toValue(valueSegment); } @@ -154,7 +159,7 @@ private long writeValue(D value, MemorySegment memorySegment, long offset) { MemorySegment valueSegment = serializer.fromValue(value); long size = valueSegment.byteSize(); - memorySegment.set(ValueLayout.JAVA_LONG_UNALIGNED, offset, size); + memorySegment.set(LONG_LAYOUT, offset, size); MemorySegment.copy(valueSegment, 0, memorySegment, offset + SIZE_LENGTH, size); return offset + SIZE_LENGTH + size; } @@ -165,8 +170,7 @@ private long writeValue(D value, MemorySegment memorySegment, long offset) { /** * Read offsets from file.
    - * If file doesn't exist will be filled with empty array and output a warning message.
    - * After completion of work storageOffsets not null. + * If file doesn't exist will be MemorySegment.NULL */ private synchronized void readOffsets() { if (storageOffsets != null) { @@ -174,39 +178,38 @@ private synchronized void readOffsets() { } File offsetsFile = basePath.resolve(OFFSETS_FILENAME).toFile(); if (!offsetsFile.exists()) { - // Предположим что тут и далее нормальный логгер, и мы вызываем log.warning(), log.error() вместо этого - System.out.println( - "[WARN] Previous saved data in path: " + offsetsFile.getPath() + " didn't found." - + "It's ok if this storage launches first time or didn't save data before" + logger.warning(() -> + "Previous saved data in path: " + offsetsFile.getPath() + " didn't found." + + "It's ok if this storage launches first time or didn't save data before" ); - this.storageOffsets = new long[] {}; + this.storageOffsets = MemorySegment.NULL; return; } try (RandomAccessFile file = new RandomAccessFile(offsetsFile, "r"); - FileChannel channel = file.getChannel(); - Arena arena = Arena.ofConfined()) { + FileChannel channel = file.getChannel()) { - MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size(), arena); - this.storageOffsets = fileSegment.toArray(ValueLayout.OfLong.JAVA_LONG); + this.storageOffsets = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size(), Arena.ofAuto()); } catch (IOException e) { throw new UncheckedIOException(e); } } private E findInStorage(D key) { + // null means that wasn't read if (storageOffsets == null) { readOffsets(); } - if (storageOffsets.length == 0) { + // MemorySegment.NULL means that file doesn't exist (ex. first launch, no need trying to read again + if (storageOffsets == MemorySegment.NULL) { return null; } File databaseFile = basePath.resolve(DB_FILENAME).toFile(); if (!databaseFile.exists()) { - System.out.println( - "[ERROR] Previous saved data in path: " + databaseFile.getPath() + " didn't found, " - + "but offsets file exist." + logger.severe(() -> + "Previous saved data in path: " + databaseFile.getPath() + + " didn't found, + but offsets file exist." ); return null; } @@ -215,15 +218,15 @@ private E findInStorage(D key) { FileChannel channel = file.getChannel()) { Arena arena = Arena.ofAuto(); MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size(), arena); - + // binary search D foundedKey = null; - int leftOffsetIndex = -1; // offset of element <= key - int rightOffsetIndex = storageOffsets.length; // offset of element > key + long leftOffsetIndex = -1; // offset of element <= key + long rightOffsetIndex = storageOffsets.byteSize() / Long.BYTES; // offset of element > key while (leftOffsetIndex + 1 < rightOffsetIndex) { - int middleOffsetIndex = leftOffsetIndex + (rightOffsetIndex - leftOffsetIndex) / 2; - long middleOffset = storageOffsets[middleOffsetIndex]; + long middleOffsetIndex = leftOffsetIndex + (rightOffsetIndex - leftOffsetIndex) / 2; + long middleOffset = storageOffsets.getAtIndex(LONG_LAYOUT, middleOffsetIndex); D currentKey = readValue(fileSegment, middleOffset); int compared = comparator.compare(currentKey, key); @@ -243,7 +246,9 @@ private E findInStorage(D key) { return null; } - long valueOffset = storageOffsets[leftOffsetIndex] + SIZE_LENGTH + serializer.size(foundedKey); + long valueOffset = storageOffsets.getAtIndex(LONG_LAYOUT, leftOffsetIndex); + valueOffset += SIZE_LENGTH; + valueOffset += serializer.size(foundedKey); D value = readValue(fileSegment, valueOffset); return serializer.createEntry(foundedKey, value); @@ -256,51 +261,68 @@ private E findInStorage(D key) { // Writing offsets and data // =================================== - // Return nullable offsets + private void writeData() throws IOException { + // Merge data from disk with memory DAO and write updated file + addDataFromStorage(); - private long[] writeEntries() throws IOException { - Path resultPath = basePath.resolve(DB_FILENAME); + Path dbPath = basePath.resolve(DB_FILENAME); + Path offsetsPath = basePath.resolve(OFFSETS_FILENAME); - try (FileChannel channel = FileChannel.open( - resultPath, + OpenOption[] options = new OpenOption[] { StandardOpenOption.READ, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING, - StandardOpenOption.CREATE); + StandardOpenOption.CREATE + }; + + try (FileChannel db = FileChannel.open(dbPath, options); + FileChannel offsets = FileChannel.open(offsetsPath, options); Arena arena = Arena.ofConfined()) { - long size = calculateInMemoryDAOSize(); - MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_WRITE, 0, size, arena); + long dbSize = getDAOBytesSize(); + long offsetsSize = (long) dao.size() * Long.BYTES; + MemorySegment fileSegment = db.map(FileChannel.MapMode.READ_WRITE, 0, dbSize, arena); + MemorySegment offsetsSegment = offsets.map(FileChannel.MapMode.READ_WRITE, 0, offsetsSize, arena); int i = 0; long offset = 0; - long[] offsets = new long[dao.size()]; for (E entry : dao.values()) { - offsets[i++] = offset; + offsetsSegment.setAtIndex(LONG_LAYOUT, i, offset); + i += 1; + offset = writeValue(entry.key(), fileSegment, offset); offset = writeValue(entry.value(), fileSegment, offset); } fileSegment.load(); - return offsets; + offsetsSegment.load(); } } - private void writeOffsets(long[] offsets) throws IOException { - Path resultPath = basePath.resolve(OFFSETS_FILENAME); + private void addDataFromStorage() throws IOException { + Path dbPath = basePath.resolve(DB_FILENAME); + if (!Files.exists(dbPath)) { + return; + } - try (FileChannel channel = FileChannel.open( - resultPath, - StandardOpenOption.READ, - StandardOpenOption.WRITE, - StandardOpenOption.TRUNCATE_EXISTING, - StandardOpenOption.CREATE); + try (FileChannel db = FileChannel.open(dbPath, StandardOpenOption.READ); Arena arena = Arena.ofConfined()) { - long size = (long) offsets.length * SIZE_LENGTH; - MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_WRITE, 0, size, arena); - MemorySegment offsetsSegment = MemorySegment.ofArray(offsets); - MemorySegment.copy(offsetsSegment, 0, fileSegment, 0, size); - fileSegment.load(); + long dbSize = db.size(); + MemorySegment dbMemorySegment = db.map(FileChannel.MapMode.READ_ONLY, 0, dbSize, arena); + + long offset = 0; + while (offset < dbSize) { + D key = readValue(dbMemorySegment, offset); + offset += SIZE_LENGTH; + offset += serializer.size(key); + + D value = readValue(dbMemorySegment, offset); + offset += SIZE_LENGTH; + offset += serializer.size(value); + + E entry = serializer.createEntry(key, value); + dao.putIfAbsent(key, entry); + } } } @@ -308,13 +330,15 @@ private void writeOffsets(long[] offsets) throws IOException { // Some util methods // =================================== - private long calculateInMemoryDAOSize() { + private long getDAOBytesSize() { long size = 0; for (E entry : dao.values()) { - size += 2 * SIZE_LENGTH; - size += serializer.size(entry.key()); - size += serializer.size(entry.value()); + size += getEntryBytesSize(entry); } return size; } + + private long getEntryBytesSize(E entry) { + return 2 * SIZE_LENGTH + serializer.size(entry.key()) + serializer.size(entry.value()); + } } diff --git a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java index 6657c68aa..6333021bf 100644 --- a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java +++ b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java @@ -17,7 +17,6 @@ public class MemorySegmentDaoFactory implements DaoFactory.Factory> createDao(Config config) throws IOException { return new MemorySegmentDao(config); From 0c4aa95459d354143ef680a62160a6c145e6508d Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 18 Oct 2023 23:53:58 +0300 Subject: [PATCH 13/36] codeclimate --- .../AbstractInMemoryDao.java | 271 ++++++++---------- .../MemorySegmentComparator.java | 26 ++ .../kovalchukvladislav/MemorySegmentDao.java | 29 +- .../kovalchukvladislav/model/DaoEntry.java | 9 + .../kovalchukvladislav/model/DaoStorage.java | 9 + .../model/InMemoryDaoStorage.java | 33 +++ 6 files changed, 199 insertions(+), 178 deletions(-) create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentComparator.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoEntry.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoStorage.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoStorage.java diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java index 970891e73..c3311f653 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java @@ -4,10 +4,7 @@ import ru.vk.itmo.Dao; import ru.vk.itmo.Entry; -import java.io.File; import java.io.IOException; -import java.io.RandomAccessFile; -import java.io.UncheckedIOException; import java.lang.foreign.Arena; import java.lang.foreign.MemorySegment; import java.lang.foreign.ValueLayout; @@ -18,14 +15,14 @@ import java.nio.file.StandardOpenOption; import java.util.Comparator; import java.util.Iterator; +import java.util.Objects; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; -import java.util.logging.Logger; /** * Saves state (two files) when closed in config.basePath()
    directory. * - *

    Directory contains two files: + *

    State contains two files: *

  • * db with sorted by key entries *
  • @@ -33,29 +30,68 @@ * offsets with keys offsets at db(in bytes) * * - *

    This allows use binary search after db reading (usually file much less) + *

    This allows use binary search after db reading */ public abstract class AbstractInMemoryDao> implements Dao { - private static final String DB_FILENAME = "db"; - private static final int SIZE_LENGTH = Long.BYTES; - private static final String OFFSETS_FILENAME = "offsets"; private static final ValueLayout.OfLong LONG_LAYOUT = ValueLayout.JAVA_LONG_UNALIGNED; + private static final String DB_FILENAME_PREFIX = "db_"; + private static final String OFFSETS_FILENAME_PREFIX = "offsets_"; + private static final String METADATA_FILENAME = "metadata"; + private static final int SIZE_LENGTH = Long.BYTES; private final Path basePath; - private volatile boolean closed; - private volatile MemorySegment storageOffsets; + private final Path metadataPath; + private final Arena arena = Arena.ofShared(); private final Comparator comparator; private final ConcurrentNavigableMap dao; private final MemorySegmentSerializer serializer; - private final Logger logger = Logger.getLogger("InMemoryDao"); + + private final int storagesCount; + private final FileChannel[] dbFileChannels; + private final FileChannel[] offsetChannels; + private final MemorySegment[] dbMappedSegments; + private final MemorySegment[] offsetMappedSegments; protected AbstractInMemoryDao(Config config, Comparator comparator, - MemorySegmentSerializer serializer) { + MemorySegmentSerializer serializer) throws IOException { this.comparator = comparator; this.serializer = serializer; this.dao = new ConcurrentSkipListMap<>(comparator); - this.basePath = config.basePath(); + this.basePath = Objects.requireNonNull(config.basePath()); + Files.createDirectories(basePath); + this.metadataPath = basePath.resolve(METADATA_FILENAME); + + this.storagesCount = getCountFromMetadataOrCreate(); + this.dbFileChannels = new FileChannel[storagesCount]; + this.offsetChannels = new FileChannel[storagesCount]; + this.dbMappedSegments = new MemorySegment[storagesCount]; + this.offsetMappedSegments = new MemorySegment[storagesCount]; + + for (int i = 0; i < storagesCount; i++) { + readFileAndMapToSegment(DB_FILENAME_PREFIX, i, dbFileChannels, dbMappedSegments); + readFileAndMapToSegment(OFFSETS_FILENAME_PREFIX, i, offsetChannels, offsetMappedSegments); + } + } + + private int getCountFromMetadataOrCreate() throws IOException { + if (!Files.exists(metadataPath)) { + Files.writeString(metadataPath, "0", StandardOpenOption.WRITE, StandardOpenOption.CREATE); + return 0; + } + return Integer.parseInt(Files.readString(metadataPath)); + } + + private void readFileAndMapToSegment(String filenamePrefix, int index, + FileChannel[] dstChannel, + MemorySegment[] dstSegment) throws IOException { + Path path = basePath.resolve(filenamePrefix + index); + + FileChannel fileChannel = FileChannel.open(path, StandardOpenOption.READ); + MemorySegment mappedSegment = fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, Files.size(path), arena); + + dstChannel[index] = fileChannel; + dstSegment[index] = mappedSegment; } @Override @@ -63,7 +99,7 @@ public Iterator get(D from, D to) { if (from == null && to == null) { return all(); } else if (from == null) { - return allToUnsafe(to); + return allTo(to); } else if (to == null) { return allFromUnsafe(from); } @@ -74,20 +110,15 @@ public Iterator get(D from, D to) { public E get(D key) { E e = dao.get(key); if (e != null) { - return e; + return e.value() == null ? null : e; } - E fromFile = findInStorage(key); - if (fromFile == null) { - return null; - } - E previousValue = dao.putIfAbsent(key, fromFile); - if (previousValue != null) { - // If new value was putted while we were looking for in storage, just return it - // Maybe should return previousValue, as value which was stored when method called - // But this is concurrency, there are no guarantees - return previousValue; + for (int i = storagesCount - 1; i >= 0; i--) { + E fromFile = findInStorage(key, i); + if (fromFile != null) { + return fromFile.value() == null ? null : fromFile; + } } - return fromFile; + return null; } @Override @@ -132,15 +163,27 @@ public Iterator all() { @Override public synchronized void flush() throws IOException { - writeData(); - this.storageOffsets = null; + if (!dao.isEmpty()) { + writeData(); + Files.writeString(metadataPath, String.valueOf(storagesCount + 1)); + } } @Override public synchronized void close() throws IOException { - if (!closed) { - flush(); - closed = true; + if (arena.scope().isAlive()) { + arena.close(); + } + flush(); + closeChannels(dbFileChannels); + closeChannels(offsetChannels); + } + + private void closeChannels(FileChannel[] channels) throws IOException { + for (FileChannel channel : channels) { + if (channel.isOpen()) { + channel.close(); + } } } @@ -150,6 +193,9 @@ public synchronized void close() throws IOException { private D readValue(MemorySegment memorySegment, long offset) { long size = memorySegment.get(LONG_LAYOUT, offset); + if (size == 0) { + return null; + } MemorySegment valueSegment = memorySegment.asSlice(offset + SIZE_LENGTH, size); return serializer.toValue(valueSegment); } @@ -158,103 +204,53 @@ private D readValue(MemorySegment memorySegment, long offset) { private long writeValue(D value, MemorySegment memorySegment, long offset) { MemorySegment valueSegment = serializer.fromValue(value); long size = valueSegment.byteSize(); - memorySegment.set(LONG_LAYOUT, offset, size); - MemorySegment.copy(valueSegment, 0, memorySegment, offset + SIZE_LENGTH, size); + if (size != 0) { + MemorySegment.copy(valueSegment, 0, memorySegment, offset + SIZE_LENGTH, size); + } return offset + SIZE_LENGTH + size; } - // =================================== - // Reading offsets and data - // =================================== - - /** - * Read offsets from file.
    - * If file doesn't exist will be MemorySegment.NULL - */ - private synchronized void readOffsets() { - if (storageOffsets != null) { - return; - } - File offsetsFile = basePath.resolve(OFFSETS_FILENAME).toFile(); - if (!offsetsFile.exists()) { - logger.warning(() -> - "Previous saved data in path: " + offsetsFile.getPath() + " didn't found." - + "It's ok if this storage launches first time or didn't save data before" - ); - this.storageOffsets = MemorySegment.NULL; - return; - } - - try (RandomAccessFile file = new RandomAccessFile(offsetsFile, "r"); - FileChannel channel = file.getChannel()) { - - this.storageOffsets = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size(), Arena.ofAuto()); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } + private E findInStorage(D key, int index) { + MemorySegment storage = dbMappedSegments[index]; + MemorySegment offsets = offsetMappedSegments[index]; - private E findInStorage(D key) { - // null means that wasn't read - if (storageOffsets == null) { - readOffsets(); - } - // MemorySegment.NULL means that file doesn't exist (ex. first launch, no need trying to read again - if (storageOffsets == MemorySegment.NULL) { + long upperBoundOffset = findUpperBoundOffset(key, storage, offsets); + if (upperBoundOffset == -1) { return null; } - - File databaseFile = basePath.resolve(DB_FILENAME).toFile(); - if (!databaseFile.exists()) { - logger.severe(() -> - "Previous saved data in path: " + databaseFile.getPath() - + " didn't found, + but offsets file exist." - ); - return null; + D upperBoundKey = readValue(storage, upperBoundOffset); + if (comparator.compare(upperBoundKey, key) == 0) { + D value = readValue(storage, upperBoundOffset + SIZE_LENGTH + serializer.size(upperBoundKey)); + return serializer.createEntry(upperBoundKey, value); } + return null; + } - try (RandomAccessFile file = new RandomAccessFile(databaseFile, "r"); - FileChannel channel = file.getChannel()) { - Arena arena = Arena.ofAuto(); - MemorySegment fileSegment = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size(), arena); - - // binary search - D foundedKey = null; - long leftOffsetIndex = -1; // offset of element <= key - long rightOffsetIndex = storageOffsets.byteSize() / Long.BYTES; // offset of element > key - - while (leftOffsetIndex + 1 < rightOffsetIndex) { - long middleOffsetIndex = leftOffsetIndex + (rightOffsetIndex - leftOffsetIndex) / 2; - long middleOffset = storageOffsets.getAtIndex(LONG_LAYOUT, middleOffsetIndex); - - D currentKey = readValue(fileSegment, middleOffset); - int compared = comparator.compare(currentKey, key); - - if (compared < 0) { - leftOffsetIndex = middleOffsetIndex; - } else if (compared > 0) { - rightOffsetIndex = middleOffsetIndex; - } else { - leftOffsetIndex = middleOffsetIndex; - foundedKey = currentKey; - break; - } - } - if (foundedKey == null) { - // not found, element at leftOffset < key - return null; + /** + * Returns offset that storage.get(LONG_LAYOUT, offset).key() >= key
    + * -1 otherwise + */ + private long findUpperBoundOffset(D key, MemorySegment storage, MemorySegment offsets) { + long entriesCount = offsets.byteSize() / SIZE_LENGTH; + long left = -1; + long right = entriesCount; + + while (left + 1 < right) { + long middle = left + (right - left) / 2; + long middleOffset = offsets.getAtIndex(LONG_LAYOUT, middle); + D middleKey = readValue(storage, middleOffset); + + if (comparator.compare(middleKey, key) < 0) { + left = middle; + } else { + right = middle; } - - long valueOffset = storageOffsets.getAtIndex(LONG_LAYOUT, leftOffsetIndex); - valueOffset += SIZE_LENGTH; - valueOffset += serializer.size(foundedKey); - D value = readValue(fileSegment, valueOffset); - - return serializer.createEntry(foundedKey, value); - } catch (IOException e) { - throw new UncheckedIOException(e); } + if (right == entriesCount) { + return -1; + } + return offsets.getAtIndex(LONG_LAYOUT, right); } // =================================== @@ -262,11 +258,8 @@ private E findInStorage(D key) { // =================================== private void writeData() throws IOException { - // Merge data from disk with memory DAO and write updated file - addDataFromStorage(); - - Path dbPath = basePath.resolve(DB_FILENAME); - Path offsetsPath = basePath.resolve(OFFSETS_FILENAME); + Path dbPath = basePath.resolve(DB_FILENAME_PREFIX + storagesCount); + Path offsetsPath = basePath.resolve(OFFSETS_FILENAME_PREFIX + storagesCount); OpenOption[] options = new OpenOption[] { StandardOpenOption.READ, @@ -298,38 +291,6 @@ private void writeData() throws IOException { } } - private void addDataFromStorage() throws IOException { - Path dbPath = basePath.resolve(DB_FILENAME); - if (!Files.exists(dbPath)) { - return; - } - - try (FileChannel db = FileChannel.open(dbPath, StandardOpenOption.READ); - Arena arena = Arena.ofConfined()) { - - long dbSize = db.size(); - MemorySegment dbMemorySegment = db.map(FileChannel.MapMode.READ_ONLY, 0, dbSize, arena); - - long offset = 0; - while (offset < dbSize) { - D key = readValue(dbMemorySegment, offset); - offset += SIZE_LENGTH; - offset += serializer.size(key); - - D value = readValue(dbMemorySegment, offset); - offset += SIZE_LENGTH; - offset += serializer.size(value); - - E entry = serializer.createEntry(key, value); - dao.putIfAbsent(key, entry); - } - } - } - - // =================================== - // Some util methods - // =================================== - private long getDAOBytesSize() { long size = 0; for (E entry : dao.values()) { diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentComparator.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentComparator.java new file mode 100644 index 000000000..d5fc40e4a --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentComparator.java @@ -0,0 +1,26 @@ +package ru.vk.itmo.kovalchukvladislav; + +import java.lang.foreign.MemorySegment; +import java.lang.foreign.ValueLayout; +import java.util.Comparator; + +public class MemorySegmentComparator implements Comparator { + public static final MemorySegmentComparator INSTANCE = new MemorySegmentComparator(); + private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; + + @Override + public int compare(MemorySegment a, MemorySegment b) { + long diffIndex = a.mismatch(b); + if (diffIndex == -1) { + return 0; + } else if (diffIndex == a.byteSize()) { + return -1; + } else if (diffIndex == b.byteSize()) { + return 1; + } + + byte byteA = a.getAtIndex(VALUE_LAYOUT, diffIndex); + byte byteB = b.getAtIndex(VALUE_LAYOUT, diffIndex); + return Byte.compare(byteA, byteB); + } +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java index 62895a614..9a7fd8372 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java @@ -4,34 +4,14 @@ import ru.vk.itmo.Config; import ru.vk.itmo.Entry; +import java.io.IOException; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; -import java.util.Comparator; public class MemorySegmentDao extends AbstractInMemoryDao> { - private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; private static final Serializer MEMORY_SEGMENT_SERIALIZER = new Serializer(); - private static final Comparator COMPARATOR = getComparator(); - public MemorySegmentDao(Config config) { - super(config, COMPARATOR, MEMORY_SEGMENT_SERIALIZER); - } - - private static Comparator getComparator() { - return (Comparator) (a, b) -> { - long diffIndex = a.mismatch(b); - if (diffIndex == -1) { - return 0; - } else if (diffIndex == a.byteSize()) { - return -1; - } else if (diffIndex == b.byteSize()) { - return 1; - } - - byte byteA = a.getAtIndex(VALUE_LAYOUT, diffIndex); - byte byteB = b.getAtIndex(VALUE_LAYOUT, diffIndex); - return Byte.compare(byteA, byteB); - }; + public MemorySegmentDao(Config config) throws IOException { + super(config, MemorySegmentComparator.INSTANCE, MEMORY_SEGMENT_SERIALIZER); } private static class Serializer implements MemorySegmentSerializer> { @@ -47,6 +27,9 @@ public MemorySegment fromValue(MemorySegment value) { @Override public long size(MemorySegment value) { + if (value == null) { + return 0; + } return value.byteSize(); } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoEntry.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoEntry.java new file mode 100644 index 000000000..935cc6cff --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoEntry.java @@ -0,0 +1,9 @@ +package ru.vk.itmo.kovalchukvladislav.model; + +import ru.vk.itmo.Entry; + +public interface DaoEntry> extends Comparable> { + E getEntry(); + + DaoStorage storage(); +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoStorage.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoStorage.java new file mode 100644 index 000000000..783b92033 --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoStorage.java @@ -0,0 +1,9 @@ +package ru.vk.itmo.kovalchukvladislav.model; + +import ru.vk.itmo.Entry; + +public interface DaoStorage> { + DaoEntry currentEntry(); + + DaoEntry nextEntry(); +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoStorage.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoStorage.java new file mode 100644 index 000000000..54ea19a04 --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoStorage.java @@ -0,0 +1,33 @@ +package ru.vk.itmo.kovalchukvladislav.model; + +import ru.vk.itmo.Entry; +import java.util.Comparator; +import java.util.Iterator; +import java.util.NavigableMap; + +public class InMemoryDaoStorage> implements DaoStorage { + private final Iterator iterator; + private InMemoryDaoEntry currentEntry; + private final Comparator comparator; + + public InMemoryDaoStorage(NavigableMap map, Comparator comparator) { + this.iterator = map.values().iterator(); + this.comparator = comparator; + this.currentEntry = new InMemoryDaoEntry<>(iterator.next(), this, comparator); + } + + @Override + public DaoEntry currentEntry() { + return currentEntry; + } + + @Override + public DaoEntry nextEntry() { + if (!iterator.hasNext()) { + return null; + } + InMemoryDaoEntry nextEntry = new InMemoryDaoEntry<>(iterator.next(), this, comparator); + currentEntry = nextEntry; + return nextEntry; + } +} From b5c482dd62a8bab0c71348c046188d2d1dae53eb Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 18 Oct 2023 23:54:07 +0300 Subject: [PATCH 14/36] codeclimate --- .../model/InMemoryDaoEntry.java | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoEntry.java diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoEntry.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoEntry.java new file mode 100644 index 000000000..fce02fe1d --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoEntry.java @@ -0,0 +1,31 @@ +package ru.vk.itmo.kovalchukvladislav.model; + +import ru.vk.itmo.Entry; +import java.util.Comparator; + +public class InMemoryDaoEntry> implements DaoEntry { + private final E currentEntry; + private final InMemoryDaoStorage storage; + private final Comparator comparator; + + public InMemoryDaoEntry(E entry, InMemoryDaoStorage storage, Comparator comparator) { + this.currentEntry = entry; + this.storage = storage; + this.comparator = comparator; + } + + @Override + public E getEntry() { + return currentEntry; + } + + @Override + public DaoStorage storage() { + return storage; + } + + @Override + public int compareTo(DaoEntry other) { + return comparator.compare(currentEntry.key(), other.getEntry().key()); + } +} From 82c863855cb24f8326723ea25d4ee9b9e47000d9 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 18 Oct 2023 23:58:00 +0300 Subject: [PATCH 15/36] msdf --- .../AbstractBasedOnSSTableDao.java | 441 ++++++++++++++++++ .../AbstractInMemoryDao.java | 285 +---------- .../IdentityMemorySegmentSerializer.java | 29 ++ .../kovalchukvladislav/MemorySegmentDao.java | 32 +- .../kovalchukvladislav/model/DaoEntry.java | 9 - .../kovalchukvladislav/model/DaoStorage.java | 9 - .../model/InMemoryDaoEntry.java | 31 -- .../model/InMemoryDaoStorage.java | 33 -- .../MemorySegmentDaoFactory.java | 2 +- 9 files changed, 485 insertions(+), 386 deletions(-) create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/IdentityMemorySegmentSerializer.java delete mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoEntry.java delete mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoStorage.java delete mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoEntry.java delete mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoStorage.java diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java new file mode 100644 index 000000000..fa8ca9d91 --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java @@ -0,0 +1,441 @@ +package ru.vk.itmo.kovalchukvladislav; + +import ru.vk.itmo.Config; +import ru.vk.itmo.Entry; + +import java.io.IOException; +import java.lang.foreign.Arena; +import java.lang.foreign.MemorySegment; +import java.lang.foreign.ValueLayout; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Objects; +import java.util.PriorityQueue; + +public abstract class AbstractBasedOnSSTableDao> extends AbstractInMemoryDao { + // =================================== + // Constants + // =================================== + private static final ValueLayout.OfLong LONG_LAYOUT = ValueLayout.JAVA_LONG_UNALIGNED; + private static final String OFFSETS_FILENAME_PREFIX = "offsets_"; + private static final String METADATA_FILENAME = "metadata"; + private static final String DB_FILENAME_PREFIX = "db_"; + private static final int SIZE_LENGTH = Long.BYTES; + private static final long VALUE_IS_NULL_SIZE = -1; + + // =================================== + // Variables + // =================================== + + private final Path basePath; + private final Path metadataPath; + private final Arena arena = Arena.ofShared(); + private final MemorySegmentSerializer serializer; + + // =================================== + // Storages + // =================================== + + private final int storagesCount; + private final List dbFileChannels; + private final List offsetChannels; + private final List dbMappedSegments; + private final List offsetMappedSegments; + + protected AbstractBasedOnSSTableDao(Config config, + Comparator comparator, + MemorySegmentSerializer serializer) throws IOException { + super(comparator); + this.serializer = serializer; + this.basePath = Objects.requireNonNull(config.basePath()); + + if (!Files.exists(basePath)) { + Files.createDirectory(basePath); + } + this.metadataPath = basePath.resolve(METADATA_FILENAME); + + this.storagesCount = getCountFromMetadataOrCreate(); + this.dbFileChannels = new ArrayList<>(storagesCount); + this.offsetChannels = new ArrayList<>(storagesCount); + this.dbMappedSegments = new ArrayList<>(storagesCount); + this.offsetMappedSegments = new ArrayList<>(storagesCount); + + for (int i = 0; i < storagesCount; i++) { + readFileAndMapToSegment(DB_FILENAME_PREFIX, i, dbFileChannels, dbMappedSegments); + readFileAndMapToSegment(OFFSETS_FILENAME_PREFIX, i, offsetChannels, offsetMappedSegments); + } + } + + // =================================== + // Restoring state + // =================================== + private int getCountFromMetadataOrCreate() throws IOException { + if (!Files.exists(metadataPath)) { + Files.writeString(metadataPath, "0", StandardOpenOption.WRITE, StandardOpenOption.CREATE); + return 0; + } + return Integer.parseInt(Files.readString(metadataPath)); + } + + private void readFileAndMapToSegment(String filenamePrefix, int index, + List channels, + List segments) throws IOException { + Path path = basePath.resolve(filenamePrefix + index); + FileChannel channel = FileChannel.open(path, StandardOpenOption.READ); + MemorySegment segment = channel.map(FileChannel.MapMode.READ_ONLY, 0, Files.size(path), arena); + channels.add(channel); + segments.add(segment); + } + + // =================================== + // Finding in storage + // =================================== + @Override + public Iterator get(D from, D to) { + Iterator inMemotyIterator = super.get(from, to); + List storageIterators = new ArrayList<>(storagesCount); + for (int i = 0; i < storagesCount; i++) { + storageIterators.add(new StorageIterator(dbMappedSegments.get(i), offsetMappedSegments.get(i), from, to)); + } + return new DaoIterator(inMemotyIterator, storageIterators); + } + + @Override + public E get(D key) { + E e = dao.get(key); + if (e != null) { + return e.value() == null ? null : e; + } + E fromFile = findInStorages(key); + return (fromFile == null || fromFile.value() == null) ? null : fromFile; + } + + private E findInStorages(D key) { + for (int i = storagesCount - 1; i >= 0; i--) { + MemorySegment storage = dbMappedSegments.get(i); + MemorySegment offsets = offsetMappedSegments.get(i); + + long lowerBoundOffset = findLowerBoundOffset(key, storage, offsets); + if (lowerBoundOffset == -1) { + continue; + } + D lowerBoundKey = readValue(storage, lowerBoundOffset); + if (comparator.compare(lowerBoundKey, key) == 0) { + D value = readValue(storage, lowerBoundOffset + SIZE_LENGTH + serializer.size(lowerBoundKey)); + return serializer.createEntry(lowerBoundKey, value); + } + } + return null; + } + + /** + * Returns the greater offset that storage.get(LONG_LAYOUT, offset).key() <= key
    + * -1 otherwise + */ + private long findLowerBoundOffset(D key, MemorySegment storage, MemorySegment offsets) { + long entriesCount = offsets.byteSize() / SIZE_LENGTH; + long left = -1; + long right = entriesCount; + + while (left + 1 < right) { + long middle = left + (right - left) / 2; + long middleOffset = offsets.getAtIndex(LONG_LAYOUT, middle); + D middleKey = readValue(storage, middleOffset); + + if (comparator.compare(middleKey, key) <= 0) { + left = middle; + } else { + right = middle; + } + } + return left == -1 ? -1 : offsets.getAtIndex(LONG_LAYOUT, left); + } + + // =================================== + // Reading values + // =================================== + + private D readValue(MemorySegment memorySegment, long offset) { + long size = memorySegment.get(LONG_LAYOUT, offset); + if (size == VALUE_IS_NULL_SIZE) { + return null; + } + MemorySegment valueSegment = memorySegment.asSlice(offset + SIZE_LENGTH, size); + return serializer.toValue(valueSegment); + } + + // Return new offset + private long writeValue(D value, MemorySegment memorySegment, long offset) { + MemorySegment valueSegment = serializer.fromValue(value); + if (valueSegment == null) { + memorySegment.set(LONG_LAYOUT, offset, VALUE_IS_NULL_SIZE); + return offset + SIZE_LENGTH; + } + long size = valueSegment.byteSize(); + memorySegment.set(LONG_LAYOUT, offset, size); + MemorySegment.copy(valueSegment, 0, memorySegment, offset + SIZE_LENGTH, size); + return offset + SIZE_LENGTH + size; + } + + // =================================== + // Writing offsets and data + // =================================== + + private void writeData() throws IOException { + Path dbPath = basePath.resolve(DB_FILENAME_PREFIX + storagesCount); + Path offsetsPath = basePath.resolve(OFFSETS_FILENAME_PREFIX + storagesCount); + + OpenOption[] options = new OpenOption[] { + StandardOpenOption.READ, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING, + StandardOpenOption.CREATE + }; + + try (FileChannel db = FileChannel.open(dbPath, options); + FileChannel offsets = FileChannel.open(offsetsPath, options); + Arena confined = Arena.ofConfined()) { + + long dbSize = getDAOBytesSize(); + long offsetsSize = (long) dao.size() * Long.BYTES; + MemorySegment fileSegment = db.map(FileChannel.MapMode.READ_WRITE, 0, dbSize, confined); + MemorySegment offsetsSegment = offsets.map(FileChannel.MapMode.READ_WRITE, 0, offsetsSize, confined); + + int i = 0; + long offset = 0; + for (E entry : dao.values()) { + offsetsSegment.setAtIndex(LONG_LAYOUT, i, offset); + i += 1; + + offset = writeValue(entry.key(), fileSegment, offset); + offset = writeValue(entry.value(), fileSegment, offset); + } + fileSegment.load(); + offsetsSegment.load(); + } + } + + private long getDAOBytesSize() { + long size = 0; + for (E entry : dao.values()) { + size += getEntryBytesSize(entry); + } + return size; + } + + private long getEntryBytesSize(E entry) { + return 2 * SIZE_LENGTH + serializer.size(entry.key()) + serializer.size(entry.value()); + } + + // =================================== + // Close and flush + // =================================== + + @Override + public synchronized void flush() throws IOException { + if (!dao.isEmpty()) { + writeData(); + Files.writeString(metadataPath, String.valueOf(storagesCount + 1)); + } + } + + @Override + public synchronized void close() throws IOException { + if (arena.scope().isAlive()) { + arena.close(); + } + flush(); + closeChannels(dbFileChannels); + closeChannels(offsetChannels); + } + + private void closeChannels(List channels) throws IOException { + for (FileChannel channel : channels) { + if (channel.isOpen()) { + channel.close(); + } + } + } + + // =================================== + // Iterators + // =================================== + + private class DaoIterator implements Iterator { + private static final Integer IN_MEMORY_ITERATOR_ID = Integer.MAX_VALUE; + private final Iterator inMemoryIterator; + private final List storageIterators; + private final PriorityQueue queue; + + public DaoIterator(Iterator inMemoryIterator, List storageIterators) { + this.inMemoryIterator = inMemoryIterator; + this.storageIterators = storageIterators; + this.queue = new PriorityQueue<>(1 + storageIterators.size()); + + addEntryByIteratorIdSafe(IN_MEMORY_ITERATOR_ID); + for (int i = 0; i < storageIterators.size(); i++) { + addEntryByIteratorIdSafe(i); + } + cleanByNull(); + } + + @Override + public boolean hasNext() { + return !queue.isEmpty(); + } + + @Override + public E next() { + if (queue.isEmpty()) { + throw new NoSuchElementException(); + } + IndexedEntry minElement = queue.peek(); + E minEntry = minElement.entry; + cleanByKey(minElement.entry.key()); + cleanByNull(); + return minEntry; + } + + private void cleanByKey(D key) { + while (!queue.isEmpty() && comparator.compare(queue.peek().entry.key(), key) == 0) { + IndexedEntry removedEntry = queue.remove(); + int iteratorId = removedEntry.iteratorId; + addEntryByIteratorIdSafe(iteratorId); + } + } + + private void cleanByNull() { + while (!queue.isEmpty()) { + E entry = queue.peek().entry; + if (entry.value() != null) { + break; + } + cleanByKey(entry.key()); + } + } + + private void addEntryByIteratorIdSafe(int iteratorId) { + Iterator iteratorById = getIteratorById(iteratorId); + if (iteratorById.hasNext()) { + E next = iteratorById.next(); + queue.add(new IndexedEntry(iteratorId, next)); + } + } + + private Iterator getIteratorById(int id) { + if (id == IN_MEMORY_ITERATOR_ID) { + return inMemoryIterator; + } + return storageIterators.get(id); + } + } + + private class IndexedEntry implements Comparable { + final int iteratorId; + final E entry; + + public IndexedEntry(int iteratorId, E entry) { + this.iteratorId = iteratorId; + this.entry = entry; + } + + @Override + public int compareTo(IndexedEntry other) { + int compared = comparator.compare(entry.key(), other.entry.key()); + if (compared != 0) { + return compared; + } + return -Integer.compare(iteratorId, other.iteratorId); + } + } + + private class StorageIterator implements Iterator { + private final MemorySegment storageSegment; + private final long end; + private long start; + + public StorageIterator(MemorySegment storageSegment, MemorySegment offsetsSegment, D from, D to) { + this.storageSegment = storageSegment; + + if (offsetsSegment.byteSize() == 0) { + this.start = -1; + this.end = -1; + } else { + this.start = calculateStartPosition(offsetsSegment, from); + this.end = calculateEndPosition(offsetsSegment, to); + } + } + + private long calculateStartPosition(MemorySegment offsetsSegment, D from) { + if (from == null) { + return getFirstOffset(offsetsSegment); + } + long lowerBoundOffset = findLowerBoundOffset(from, storageSegment, offsetsSegment); + if (lowerBoundOffset == -1) { + // from the smallest element and doesn't exist + return getFirstOffset(offsetsSegment); + } else { + // storage[lowerBoundOffset] <= from, we need >= from only + return moveOffsetIfFirstKeyAreNotEqual(from, lowerBoundOffset); + } + } + + private long calculateEndPosition(MemorySegment offsetsSegment, D to) { + if (to == null) { + return getEndOffset(); + } + long lowerBoundOffset = findLowerBoundOffset(to, storageSegment, offsetsSegment); + if (lowerBoundOffset == -1) { + // to the smallest element and doesn't exist + return getFirstOffset(offsetsSegment); + } + // storage[lowerBoundOffset] <= to, we need >= to only + return moveOffsetIfFirstKeyAreNotEqual(to, lowerBoundOffset); + } + + private long getFirstOffset(MemorySegment offsetsSegment) { + return offsetsSegment.getAtIndex(LONG_LAYOUT, 0); + } + + private long getEndOffset() { + return storageSegment.byteSize(); + } + + private long moveOffsetIfFirstKeyAreNotEqual(D from, long lowerBoundOffset) { + long offset = lowerBoundOffset; + D lowerBoundKey = readValue(storageSegment, offset); + if (comparator.compare(lowerBoundKey, from) != 0) { + offset += SIZE_LENGTH; + offset += serializer.size(lowerBoundKey); + D lowerBoundValue = readValue(storageSegment, offset); + offset += SIZE_LENGTH; + offset += serializer.size(lowerBoundValue); + } + return offset; + } + + @Override + public boolean hasNext() { + return start < end; + } + + @Override + public E next() { + D key = readValue(storageSegment, start); + start += SIZE_LENGTH; + start += serializer.size(key); + D value = readValue(storageSegment, start); + start += SIZE_LENGTH; + start += serializer.size(value); + return serializer.createEntry(key, value); + } + } +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java index c3311f653..3f48b9981 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java @@ -1,305 +1,44 @@ package ru.vk.itmo.kovalchukvladislav; -import ru.vk.itmo.Config; import ru.vk.itmo.Dao; import ru.vk.itmo.Entry; -import java.io.IOException; -import java.lang.foreign.Arena; -import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; -import java.nio.channels.FileChannel; -import java.nio.file.Files; -import java.nio.file.OpenOption; -import java.nio.file.Path; -import java.nio.file.StandardOpenOption; import java.util.Comparator; import java.util.Iterator; -import java.util.Objects; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; -/** - * Saves state (two files) when closed in config.basePath()
    directory. - * - *

    State contains two files: - *

  • - * db with sorted by key entries - *
  • - *
  • - * offsets with keys offsets at db(in bytes) - *
  • - * - *

    This allows use binary search after db reading - */ public abstract class AbstractInMemoryDao> implements Dao { - private static final ValueLayout.OfLong LONG_LAYOUT = ValueLayout.JAVA_LONG_UNALIGNED; - private static final String DB_FILENAME_PREFIX = "db_"; - private static final String OFFSETS_FILENAME_PREFIX = "offsets_"; - private static final String METADATA_FILENAME = "metadata"; - private static final int SIZE_LENGTH = Long.BYTES; + protected final ConcurrentNavigableMap dao; + protected final Comparator comparator; - private final Path basePath; - private final Path metadataPath; - private final Arena arena = Arena.ofShared(); - private final Comparator comparator; - private final ConcurrentNavigableMap dao; - private final MemorySegmentSerializer serializer; - - private final int storagesCount; - private final FileChannel[] dbFileChannels; - private final FileChannel[] offsetChannels; - private final MemorySegment[] dbMappedSegments; - private final MemorySegment[] offsetMappedSegments; - - protected AbstractInMemoryDao(Config config, - Comparator comparator, - MemorySegmentSerializer serializer) throws IOException { - this.comparator = comparator; - this.serializer = serializer; + protected AbstractInMemoryDao(Comparator comparator) { this.dao = new ConcurrentSkipListMap<>(comparator); - this.basePath = Objects.requireNonNull(config.basePath()); - Files.createDirectories(basePath); - this.metadataPath = basePath.resolve(METADATA_FILENAME); - - this.storagesCount = getCountFromMetadataOrCreate(); - this.dbFileChannels = new FileChannel[storagesCount]; - this.offsetChannels = new FileChannel[storagesCount]; - this.dbMappedSegments = new MemorySegment[storagesCount]; - this.offsetMappedSegments = new MemorySegment[storagesCount]; - - for (int i = 0; i < storagesCount; i++) { - readFileAndMapToSegment(DB_FILENAME_PREFIX, i, dbFileChannels, dbMappedSegments); - readFileAndMapToSegment(OFFSETS_FILENAME_PREFIX, i, offsetChannels, offsetMappedSegments); - } - } - - private int getCountFromMetadataOrCreate() throws IOException { - if (!Files.exists(metadataPath)) { - Files.writeString(metadataPath, "0", StandardOpenOption.WRITE, StandardOpenOption.CREATE); - return 0; - } - return Integer.parseInt(Files.readString(metadataPath)); - } - - private void readFileAndMapToSegment(String filenamePrefix, int index, - FileChannel[] dstChannel, - MemorySegment[] dstSegment) throws IOException { - Path path = basePath.resolve(filenamePrefix + index); - - FileChannel fileChannel = FileChannel.open(path, StandardOpenOption.READ); - MemorySegment mappedSegment = fileChannel.map(FileChannel.MapMode.READ_ONLY, 0, Files.size(path), arena); - - dstChannel[index] = fileChannel; - dstSegment[index] = mappedSegment; + this.comparator = comparator; } @Override public Iterator get(D from, D to) { + ConcurrentNavigableMap subMap; if (from == null && to == null) { - return all(); + subMap = dao; } else if (from == null) { - return allTo(to); + subMap = dao.headMap(to); } else if (to == null) { - return allFromUnsafe(from); + subMap = dao.tailMap(from); + } else { + subMap = dao.subMap(from, to); } - return dao.subMap(from, to).values().iterator(); + return subMap.values().iterator(); } @Override public E get(D key) { - E e = dao.get(key); - if (e != null) { - return e.value() == null ? null : e; - } - for (int i = storagesCount - 1; i >= 0; i--) { - E fromFile = findInStorage(key, i); - if (fromFile != null) { - return fromFile.value() == null ? null : fromFile; - } - } - return null; + return dao.get(key); } @Override public void upsert(E entry) { dao.put(entry.key(), entry); } - - @Override - public Iterator allFrom(D from) { - return from == null ? all() : allFromUnsafe(from); - } - - /** - * Doesn't check the argument for null. Should be called only if there was a check before - * - * @param from NotNull lower bound of range (inclusive) - * @return entries with key >= from - */ - private Iterator allFromUnsafe(D from) { - return dao.tailMap(from).values().iterator(); - } - - @Override - public Iterator allTo(D to) { - return to == null ? all() : allToUnsafe(to); - } - - /** - * Doesn't check the argument for null. Should be called only if there was a check before - * - * @param to NotNull upper bound of range (exclusive) - * @return entries with key < to - */ - private Iterator allToUnsafe(D to) { - return dao.headMap(to).values().iterator(); - } - - @Override - public Iterator all() { - return dao.values().iterator(); - } - - @Override - public synchronized void flush() throws IOException { - if (!dao.isEmpty()) { - writeData(); - Files.writeString(metadataPath, String.valueOf(storagesCount + 1)); - } - } - - @Override - public synchronized void close() throws IOException { - if (arena.scope().isAlive()) { - arena.close(); - } - flush(); - closeChannels(dbFileChannels); - closeChannels(offsetChannels); - } - - private void closeChannels(FileChannel[] channels) throws IOException { - for (FileChannel channel : channels) { - if (channel.isOpen()) { - channel.close(); - } - } - } - - // =================================== - // Reading values - // =================================== - - private D readValue(MemorySegment memorySegment, long offset) { - long size = memorySegment.get(LONG_LAYOUT, offset); - if (size == 0) { - return null; - } - MemorySegment valueSegment = memorySegment.asSlice(offset + SIZE_LENGTH, size); - return serializer.toValue(valueSegment); - } - - // Return new offset - private long writeValue(D value, MemorySegment memorySegment, long offset) { - MemorySegment valueSegment = serializer.fromValue(value); - long size = valueSegment.byteSize(); - memorySegment.set(LONG_LAYOUT, offset, size); - if (size != 0) { - MemorySegment.copy(valueSegment, 0, memorySegment, offset + SIZE_LENGTH, size); - } - return offset + SIZE_LENGTH + size; - } - - private E findInStorage(D key, int index) { - MemorySegment storage = dbMappedSegments[index]; - MemorySegment offsets = offsetMappedSegments[index]; - - long upperBoundOffset = findUpperBoundOffset(key, storage, offsets); - if (upperBoundOffset == -1) { - return null; - } - D upperBoundKey = readValue(storage, upperBoundOffset); - if (comparator.compare(upperBoundKey, key) == 0) { - D value = readValue(storage, upperBoundOffset + SIZE_LENGTH + serializer.size(upperBoundKey)); - return serializer.createEntry(upperBoundKey, value); - } - return null; - } - - /** - * Returns offset that storage.get(LONG_LAYOUT, offset).key() >= key
    - * -1 otherwise - */ - private long findUpperBoundOffset(D key, MemorySegment storage, MemorySegment offsets) { - long entriesCount = offsets.byteSize() / SIZE_LENGTH; - long left = -1; - long right = entriesCount; - - while (left + 1 < right) { - long middle = left + (right - left) / 2; - long middleOffset = offsets.getAtIndex(LONG_LAYOUT, middle); - D middleKey = readValue(storage, middleOffset); - - if (comparator.compare(middleKey, key) < 0) { - left = middle; - } else { - right = middle; - } - } - if (right == entriesCount) { - return -1; - } - return offsets.getAtIndex(LONG_LAYOUT, right); - } - - // =================================== - // Writing offsets and data - // =================================== - - private void writeData() throws IOException { - Path dbPath = basePath.resolve(DB_FILENAME_PREFIX + storagesCount); - Path offsetsPath = basePath.resolve(OFFSETS_FILENAME_PREFIX + storagesCount); - - OpenOption[] options = new OpenOption[] { - StandardOpenOption.READ, - StandardOpenOption.WRITE, - StandardOpenOption.TRUNCATE_EXISTING, - StandardOpenOption.CREATE - }; - - try (FileChannel db = FileChannel.open(dbPath, options); - FileChannel offsets = FileChannel.open(offsetsPath, options); - Arena arena = Arena.ofConfined()) { - - long dbSize = getDAOBytesSize(); - long offsetsSize = (long) dao.size() * Long.BYTES; - MemorySegment fileSegment = db.map(FileChannel.MapMode.READ_WRITE, 0, dbSize, arena); - MemorySegment offsetsSegment = offsets.map(FileChannel.MapMode.READ_WRITE, 0, offsetsSize, arena); - - int i = 0; - long offset = 0; - for (E entry : dao.values()) { - offsetsSegment.setAtIndex(LONG_LAYOUT, i, offset); - i += 1; - - offset = writeValue(entry.key(), fileSegment, offset); - offset = writeValue(entry.value(), fileSegment, offset); - } - fileSegment.load(); - offsetsSegment.load(); - } - } - - private long getDAOBytesSize() { - long size = 0; - for (E entry : dao.values()) { - size += getEntryBytesSize(entry); - } - return size; - } - - private long getEntryBytesSize(E entry) { - return 2 * SIZE_LENGTH + serializer.size(entry.key()) + serializer.size(entry.value()); - } } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/IdentityMemorySegmentSerializer.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/IdentityMemorySegmentSerializer.java new file mode 100644 index 000000000..82be82396 --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/IdentityMemorySegmentSerializer.java @@ -0,0 +1,29 @@ +package ru.vk.itmo.kovalchukvladislav; + +import ru.vk.itmo.BaseEntry; +import ru.vk.itmo.Entry; +import java.lang.foreign.MemorySegment; + +public class IdentityMemorySegmentSerializer implements MemorySegmentSerializer> { + public static final IdentityMemorySegmentSerializer INSTANCE = new IdentityMemorySegmentSerializer(); + + @Override + public MemorySegment toValue(MemorySegment input) { + return input; + } + + @Override + public MemorySegment fromValue(MemorySegment value) { + return value; + } + + @Override + public long size(MemorySegment value) { + return value == null ? 0 : value.byteSize(); + } + + @Override + public Entry createEntry(MemorySegment key, MemorySegment value) { + return new BaseEntry<>(key, value); + } +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java index 9a7fd8372..99313772a 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java @@ -1,42 +1,14 @@ package ru.vk.itmo.kovalchukvladislav; -import ru.vk.itmo.BaseEntry; import ru.vk.itmo.Config; import ru.vk.itmo.Entry; import java.io.IOException; import java.lang.foreign.MemorySegment; -public class MemorySegmentDao extends AbstractInMemoryDao> { - private static final Serializer MEMORY_SEGMENT_SERIALIZER = new Serializer(); +public class MemorySegmentDao extends AbstractBasedOnSSTableDao> { public MemorySegmentDao(Config config) throws IOException { - super(config, MemorySegmentComparator.INSTANCE, MEMORY_SEGMENT_SERIALIZER); - } - - private static class Serializer implements MemorySegmentSerializer> { - @Override - public MemorySegment toValue(MemorySegment input) { - return input; - } - - @Override - public MemorySegment fromValue(MemorySegment value) { - return value; - } - - @Override - public long size(MemorySegment value) { - if (value == null) { - return 0; - } - return value.byteSize(); - } - - @Override - public Entry createEntry(MemorySegment key, MemorySegment value) { - return new BaseEntry<>(key, value); - } + super(config, MemorySegmentComparator.INSTANCE, IdentityMemorySegmentSerializer.INSTANCE); } } - diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoEntry.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoEntry.java deleted file mode 100644 index 935cc6cff..000000000 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoEntry.java +++ /dev/null @@ -1,9 +0,0 @@ -package ru.vk.itmo.kovalchukvladislav.model; - -import ru.vk.itmo.Entry; - -public interface DaoEntry> extends Comparable> { - E getEntry(); - - DaoStorage storage(); -} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoStorage.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoStorage.java deleted file mode 100644 index 783b92033..000000000 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoStorage.java +++ /dev/null @@ -1,9 +0,0 @@ -package ru.vk.itmo.kovalchukvladislav.model; - -import ru.vk.itmo.Entry; - -public interface DaoStorage> { - DaoEntry currentEntry(); - - DaoEntry nextEntry(); -} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoEntry.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoEntry.java deleted file mode 100644 index fce02fe1d..000000000 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoEntry.java +++ /dev/null @@ -1,31 +0,0 @@ -package ru.vk.itmo.kovalchukvladislav.model; - -import ru.vk.itmo.Entry; -import java.util.Comparator; - -public class InMemoryDaoEntry> implements DaoEntry { - private final E currentEntry; - private final InMemoryDaoStorage storage; - private final Comparator comparator; - - public InMemoryDaoEntry(E entry, InMemoryDaoStorage storage, Comparator comparator) { - this.currentEntry = entry; - this.storage = storage; - this.comparator = comparator; - } - - @Override - public E getEntry() { - return currentEntry; - } - - @Override - public DaoStorage storage() { - return storage; - } - - @Override - public int compareTo(DaoEntry other) { - return comparator.compare(currentEntry.key(), other.getEntry().key()); - } -} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoStorage.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoStorage.java deleted file mode 100644 index 54ea19a04..000000000 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/InMemoryDaoStorage.java +++ /dev/null @@ -1,33 +0,0 @@ -package ru.vk.itmo.kovalchukvladislav.model; - -import ru.vk.itmo.Entry; -import java.util.Comparator; -import java.util.Iterator; -import java.util.NavigableMap; - -public class InMemoryDaoStorage> implements DaoStorage { - private final Iterator iterator; - private InMemoryDaoEntry currentEntry; - private final Comparator comparator; - - public InMemoryDaoStorage(NavigableMap map, Comparator comparator) { - this.iterator = map.values().iterator(); - this.comparator = comparator; - this.currentEntry = new InMemoryDaoEntry<>(iterator.next(), this, comparator); - } - - @Override - public DaoEntry currentEntry() { - return currentEntry; - } - - @Override - public DaoEntry nextEntry() { - if (!iterator.hasNext()) { - return null; - } - InMemoryDaoEntry nextEntry = new InMemoryDaoEntry<>(iterator.next(), this, comparator); - currentEntry = nextEntry; - return nextEntry; - } -} diff --git a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java index 6333021bf..c96427e2f 100644 --- a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java +++ b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java @@ -12,7 +12,7 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; -@DaoFactory(stage = 2) +@DaoFactory(stage = 3) public class MemorySegmentDaoFactory implements DaoFactory.Factory> { private static final Charset CHARSET = StandardCharsets.UTF_8; private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; From 535d4025425db78299a11e2360953ef8800c7e60 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 18 Oct 2023 23:58:00 +0300 Subject: [PATCH 16/36] i hate codeclimate --- .../AbstractBasedOnSSTableDao.java | 290 ++---------------- .../IdentityMemorySegmentSerializer.java | 29 -- .../MemorySegmentComparator.java | 26 -- .../kovalchukvladislav/MemorySegmentDao.java | 3 +- .../MemorySegmentEntryExtractor.java | 105 +++++++ .../MemorySegmentSerializer.java | 14 - .../kovalchukvladislav/model/DaoIterator.java | 193 ++++++++++++ .../model/EntryExtractor.java | 23 ++ 8 files changed, 349 insertions(+), 334 deletions(-) delete mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/IdentityMemorySegmentSerializer.java delete mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentComparator.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentEntryExtractor.java delete mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoIterator.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/model/EntryExtractor.java diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java index fa8ca9d91..ca7033c0d 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java @@ -2,6 +2,8 @@ import ru.vk.itmo.Config; import ru.vk.itmo.Entry; +import ru.vk.itmo.kovalchukvladislav.model.DaoIterator; +import ru.vk.itmo.kovalchukvladislav.model.EntryExtractor; import java.io.IOException; import java.lang.foreign.Arena; @@ -13,12 +15,9 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.ArrayList; -import java.util.Comparator; import java.util.Iterator; import java.util.List; -import java.util.NoSuchElementException; import java.util.Objects; -import java.util.PriorityQueue; public abstract class AbstractBasedOnSSTableDao> extends AbstractInMemoryDao { // =================================== @@ -28,8 +27,6 @@ public abstract class AbstractBasedOnSSTableDao> extends A private static final String OFFSETS_FILENAME_PREFIX = "offsets_"; private static final String METADATA_FILENAME = "metadata"; private static final String DB_FILENAME_PREFIX = "db_"; - private static final int SIZE_LENGTH = Long.BYTES; - private static final long VALUE_IS_NULL_SIZE = -1; // =================================== // Variables @@ -38,7 +35,7 @@ public abstract class AbstractBasedOnSSTableDao> extends A private final Path basePath; private final Path metadataPath; private final Arena arena = Arena.ofShared(); - private final MemorySegmentSerializer serializer; + private final EntryExtractor extractor; // =================================== // Storages @@ -50,11 +47,9 @@ public abstract class AbstractBasedOnSSTableDao> extends A private final List dbMappedSegments; private final List offsetMappedSegments; - protected AbstractBasedOnSSTableDao(Config config, - Comparator comparator, - MemorySegmentSerializer serializer) throws IOException { - super(comparator); - this.serializer = serializer; + protected AbstractBasedOnSSTableDao(Config config, EntryExtractor extractor) throws IOException { + super(extractor); + this.extractor = extractor; this.basePath = Objects.requireNonNull(config.basePath()); if (!Files.exists(basePath)) { @@ -89,10 +84,12 @@ private void readFileAndMapToSegment(String filenamePrefix, int index, List channels, List segments) throws IOException { Path path = basePath.resolve(filenamePrefix + index); - FileChannel channel = FileChannel.open(path, StandardOpenOption.READ); - MemorySegment segment = channel.map(FileChannel.MapMode.READ_ONLY, 0, Files.size(path), arena); - channels.add(channel); - segments.add(segment); + try (FileChannel channel = FileChannel.open(path, StandardOpenOption.READ)) { + + MemorySegment segment = channel.map(FileChannel.MapMode.READ_ONLY, 0, Files.size(path), arena); + channels.add(channel); + segments.add(segment); + } } // =================================== @@ -101,11 +98,7 @@ private void readFileAndMapToSegment(String filenamePrefix, int index, @Override public Iterator get(D from, D to) { Iterator inMemotyIterator = super.get(from, to); - List storageIterators = new ArrayList<>(storagesCount); - for (int i = 0; i < storagesCount; i++) { - storageIterators.add(new StorageIterator(dbMappedSegments.get(i), offsetMappedSegments.get(i), from, to)); - } - return new DaoIterator(inMemotyIterator, storageIterators); + return new DaoIterator<>(from, to, inMemotyIterator, dbMappedSegments, offsetMappedSegments, extractor); } @Override @@ -123,72 +116,24 @@ private E findInStorages(D key) { MemorySegment storage = dbMappedSegments.get(i); MemorySegment offsets = offsetMappedSegments.get(i); - long lowerBoundOffset = findLowerBoundOffset(key, storage, offsets); - if (lowerBoundOffset == -1) { + long offset = extractor.findLowerBoundValueOffset(key, storage, offsets); + if (offset == -1) { continue; } - D lowerBoundKey = readValue(storage, lowerBoundOffset); + D lowerBoundKey = extractor.readValue(storage, offset); + if (comparator.compare(lowerBoundKey, key) == 0) { - D value = readValue(storage, lowerBoundOffset + SIZE_LENGTH + serializer.size(lowerBoundKey)); - return serializer.createEntry(lowerBoundKey, value); + long valueOffset = offset + extractor.size(lowerBoundKey); + D value = extractor.readValue(storage, valueOffset); + return extractor.createEntry(lowerBoundKey, value); } } return null; } - /** - * Returns the greater offset that storage.get(LONG_LAYOUT, offset).key() <= key
    - * -1 otherwise - */ - private long findLowerBoundOffset(D key, MemorySegment storage, MemorySegment offsets) { - long entriesCount = offsets.byteSize() / SIZE_LENGTH; - long left = -1; - long right = entriesCount; - - while (left + 1 < right) { - long middle = left + (right - left) / 2; - long middleOffset = offsets.getAtIndex(LONG_LAYOUT, middle); - D middleKey = readValue(storage, middleOffset); - - if (comparator.compare(middleKey, key) <= 0) { - left = middle; - } else { - right = middle; - } - } - return left == -1 ? -1 : offsets.getAtIndex(LONG_LAYOUT, left); - } - // =================================== - // Reading values + // Writing data // =================================== - - private D readValue(MemorySegment memorySegment, long offset) { - long size = memorySegment.get(LONG_LAYOUT, offset); - if (size == VALUE_IS_NULL_SIZE) { - return null; - } - MemorySegment valueSegment = memorySegment.asSlice(offset + SIZE_LENGTH, size); - return serializer.toValue(valueSegment); - } - - // Return new offset - private long writeValue(D value, MemorySegment memorySegment, long offset) { - MemorySegment valueSegment = serializer.fromValue(value); - if (valueSegment == null) { - memorySegment.set(LONG_LAYOUT, offset, VALUE_IS_NULL_SIZE); - return offset + SIZE_LENGTH; - } - long size = valueSegment.byteSize(); - memorySegment.set(LONG_LAYOUT, offset, size); - MemorySegment.copy(valueSegment, 0, memorySegment, offset + SIZE_LENGTH, size); - return offset + SIZE_LENGTH + size; - } - - // =================================== - // Writing offsets and data - // =================================== - private void writeData() throws IOException { Path dbPath = basePath.resolve(DB_FILENAME_PREFIX + storagesCount); Path offsetsPath = basePath.resolve(OFFSETS_FILENAME_PREFIX + storagesCount); @@ -202,21 +147,19 @@ private void writeData() throws IOException { try (FileChannel db = FileChannel.open(dbPath, options); FileChannel offsets = FileChannel.open(offsetsPath, options); - Arena confined = Arena.ofConfined()) { + Arena confinedArena = Arena.ofConfined()) { long dbSize = getDAOBytesSize(); long offsetsSize = (long) dao.size() * Long.BYTES; - MemorySegment fileSegment = db.map(FileChannel.MapMode.READ_WRITE, 0, dbSize, confined); - MemorySegment offsetsSegment = offsets.map(FileChannel.MapMode.READ_WRITE, 0, offsetsSize, confined); + MemorySegment fileSegment = db.map(FileChannel.MapMode.READ_WRITE, 0, dbSize, confinedArena); + MemorySegment offsetsSegment = offsets.map(FileChannel.MapMode.READ_WRITE, 0, offsetsSize, confinedArena); int i = 0; long offset = 0; for (E entry : dao.values()) { offsetsSegment.setAtIndex(LONG_LAYOUT, i, offset); i += 1; - - offset = writeValue(entry.key(), fileSegment, offset); - offset = writeValue(entry.value(), fileSegment, offset); + offset = extractor.writeEntry(entry, fileSegment, offset); } fileSegment.load(); offsetsSegment.load(); @@ -226,19 +169,14 @@ private void writeData() throws IOException { private long getDAOBytesSize() { long size = 0; for (E entry : dao.values()) { - size += getEntryBytesSize(entry); + size += extractor.size(entry); } return size; } - private long getEntryBytesSize(E entry) { - return 2 * SIZE_LENGTH + serializer.size(entry.key()) + serializer.size(entry.value()); - } - // =================================== - // Close and flush + // Flush and close // =================================== - @Override public synchronized void flush() throws IOException { if (!dao.isEmpty()) { @@ -264,178 +202,4 @@ private void closeChannels(List channels) throws IOException { } } } - - // =================================== - // Iterators - // =================================== - - private class DaoIterator implements Iterator { - private static final Integer IN_MEMORY_ITERATOR_ID = Integer.MAX_VALUE; - private final Iterator inMemoryIterator; - private final List storageIterators; - private final PriorityQueue queue; - - public DaoIterator(Iterator inMemoryIterator, List storageIterators) { - this.inMemoryIterator = inMemoryIterator; - this.storageIterators = storageIterators; - this.queue = new PriorityQueue<>(1 + storageIterators.size()); - - addEntryByIteratorIdSafe(IN_MEMORY_ITERATOR_ID); - for (int i = 0; i < storageIterators.size(); i++) { - addEntryByIteratorIdSafe(i); - } - cleanByNull(); - } - - @Override - public boolean hasNext() { - return !queue.isEmpty(); - } - - @Override - public E next() { - if (queue.isEmpty()) { - throw new NoSuchElementException(); - } - IndexedEntry minElement = queue.peek(); - E minEntry = minElement.entry; - cleanByKey(minElement.entry.key()); - cleanByNull(); - return minEntry; - } - - private void cleanByKey(D key) { - while (!queue.isEmpty() && comparator.compare(queue.peek().entry.key(), key) == 0) { - IndexedEntry removedEntry = queue.remove(); - int iteratorId = removedEntry.iteratorId; - addEntryByIteratorIdSafe(iteratorId); - } - } - - private void cleanByNull() { - while (!queue.isEmpty()) { - E entry = queue.peek().entry; - if (entry.value() != null) { - break; - } - cleanByKey(entry.key()); - } - } - - private void addEntryByIteratorIdSafe(int iteratorId) { - Iterator iteratorById = getIteratorById(iteratorId); - if (iteratorById.hasNext()) { - E next = iteratorById.next(); - queue.add(new IndexedEntry(iteratorId, next)); - } - } - - private Iterator getIteratorById(int id) { - if (id == IN_MEMORY_ITERATOR_ID) { - return inMemoryIterator; - } - return storageIterators.get(id); - } - } - - private class IndexedEntry implements Comparable { - final int iteratorId; - final E entry; - - public IndexedEntry(int iteratorId, E entry) { - this.iteratorId = iteratorId; - this.entry = entry; - } - - @Override - public int compareTo(IndexedEntry other) { - int compared = comparator.compare(entry.key(), other.entry.key()); - if (compared != 0) { - return compared; - } - return -Integer.compare(iteratorId, other.iteratorId); - } - } - - private class StorageIterator implements Iterator { - private final MemorySegment storageSegment; - private final long end; - private long start; - - public StorageIterator(MemorySegment storageSegment, MemorySegment offsetsSegment, D from, D to) { - this.storageSegment = storageSegment; - - if (offsetsSegment.byteSize() == 0) { - this.start = -1; - this.end = -1; - } else { - this.start = calculateStartPosition(offsetsSegment, from); - this.end = calculateEndPosition(offsetsSegment, to); - } - } - - private long calculateStartPosition(MemorySegment offsetsSegment, D from) { - if (from == null) { - return getFirstOffset(offsetsSegment); - } - long lowerBoundOffset = findLowerBoundOffset(from, storageSegment, offsetsSegment); - if (lowerBoundOffset == -1) { - // from the smallest element and doesn't exist - return getFirstOffset(offsetsSegment); - } else { - // storage[lowerBoundOffset] <= from, we need >= from only - return moveOffsetIfFirstKeyAreNotEqual(from, lowerBoundOffset); - } - } - - private long calculateEndPosition(MemorySegment offsetsSegment, D to) { - if (to == null) { - return getEndOffset(); - } - long lowerBoundOffset = findLowerBoundOffset(to, storageSegment, offsetsSegment); - if (lowerBoundOffset == -1) { - // to the smallest element and doesn't exist - return getFirstOffset(offsetsSegment); - } - // storage[lowerBoundOffset] <= to, we need >= to only - return moveOffsetIfFirstKeyAreNotEqual(to, lowerBoundOffset); - } - - private long getFirstOffset(MemorySegment offsetsSegment) { - return offsetsSegment.getAtIndex(LONG_LAYOUT, 0); - } - - private long getEndOffset() { - return storageSegment.byteSize(); - } - - private long moveOffsetIfFirstKeyAreNotEqual(D from, long lowerBoundOffset) { - long offset = lowerBoundOffset; - D lowerBoundKey = readValue(storageSegment, offset); - if (comparator.compare(lowerBoundKey, from) != 0) { - offset += SIZE_LENGTH; - offset += serializer.size(lowerBoundKey); - D lowerBoundValue = readValue(storageSegment, offset); - offset += SIZE_LENGTH; - offset += serializer.size(lowerBoundValue); - } - return offset; - } - - @Override - public boolean hasNext() { - return start < end; - } - - @Override - public E next() { - D key = readValue(storageSegment, start); - start += SIZE_LENGTH; - start += serializer.size(key); - D value = readValue(storageSegment, start); - start += SIZE_LENGTH; - start += serializer.size(value); - return serializer.createEntry(key, value); - } - } } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/IdentityMemorySegmentSerializer.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/IdentityMemorySegmentSerializer.java deleted file mode 100644 index 82be82396..000000000 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/IdentityMemorySegmentSerializer.java +++ /dev/null @@ -1,29 +0,0 @@ -package ru.vk.itmo.kovalchukvladislav; - -import ru.vk.itmo.BaseEntry; -import ru.vk.itmo.Entry; -import java.lang.foreign.MemorySegment; - -public class IdentityMemorySegmentSerializer implements MemorySegmentSerializer> { - public static final IdentityMemorySegmentSerializer INSTANCE = new IdentityMemorySegmentSerializer(); - - @Override - public MemorySegment toValue(MemorySegment input) { - return input; - } - - @Override - public MemorySegment fromValue(MemorySegment value) { - return value; - } - - @Override - public long size(MemorySegment value) { - return value == null ? 0 : value.byteSize(); - } - - @Override - public Entry createEntry(MemorySegment key, MemorySegment value) { - return new BaseEntry<>(key, value); - } -} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentComparator.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentComparator.java deleted file mode 100644 index d5fc40e4a..000000000 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentComparator.java +++ /dev/null @@ -1,26 +0,0 @@ -package ru.vk.itmo.kovalchukvladislav; - -import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; -import java.util.Comparator; - -public class MemorySegmentComparator implements Comparator { - public static final MemorySegmentComparator INSTANCE = new MemorySegmentComparator(); - private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; - - @Override - public int compare(MemorySegment a, MemorySegment b) { - long diffIndex = a.mismatch(b); - if (diffIndex == -1) { - return 0; - } else if (diffIndex == a.byteSize()) { - return -1; - } else if (diffIndex == b.byteSize()) { - return 1; - } - - byte byteA = a.getAtIndex(VALUE_LAYOUT, diffIndex); - byte byteB = b.getAtIndex(VALUE_LAYOUT, diffIndex); - return Byte.compare(byteA, byteB); - } -} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java index 99313772a..3c94eb639 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentDao.java @@ -7,8 +7,7 @@ import java.lang.foreign.MemorySegment; public class MemorySegmentDao extends AbstractBasedOnSSTableDao> { - public MemorySegmentDao(Config config) throws IOException { - super(config, MemorySegmentComparator.INSTANCE, IdentityMemorySegmentSerializer.INSTANCE); + super(config, MemorySegmentEntryExtractor.INSTANCE); } } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentEntryExtractor.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentEntryExtractor.java new file mode 100644 index 000000000..7d9df9968 --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentEntryExtractor.java @@ -0,0 +1,105 @@ +package ru.vk.itmo.kovalchukvladislav; + +import ru.vk.itmo.BaseEntry; +import ru.vk.itmo.Entry; +import ru.vk.itmo.kovalchukvladislav.model.EntryExtractor; + +import java.lang.foreign.MemorySegment; +import java.lang.foreign.ValueLayout; + +public class MemorySegmentEntryExtractor implements EntryExtractor> { + public static final MemorySegmentEntryExtractor INSTANCE = new MemorySegmentEntryExtractor(); + private static final ValueLayout.OfLong LONG_LAYOUT = ValueLayout.JAVA_LONG_UNALIGNED; + private static final ValueLayout.OfByte BYTE_LAYOUT = ValueLayout.JAVA_BYTE; + private static final int SIZE_LENGTH = Long.BYTES; + private static final long VALUE_IS_NULL_SIZE = -1; + + @Override + public MemorySegment readValue(MemorySegment memorySegment, long offset) { + long size = memorySegment.get(LONG_LAYOUT, offset); + if (size == VALUE_IS_NULL_SIZE) { + return null; + } + return memorySegment.asSlice(offset + SIZE_LENGTH, size); + } + + @Override + public long writeValue(MemorySegment valueSegment, MemorySegment memorySegment, long offset) { + if (valueSegment == null) { + memorySegment.set(LONG_LAYOUT, offset, VALUE_IS_NULL_SIZE); + return offset + SIZE_LENGTH; + } + long size = valueSegment.byteSize(); + memorySegment.set(LONG_LAYOUT, offset, size); + MemorySegment.copy(valueSegment, 0, memorySegment, offset + SIZE_LENGTH, size); + return offset + SIZE_LENGTH + size; + } + + @Override + public Entry readEntry(MemorySegment memorySegment, long offset) { + MemorySegment key = readValue(memorySegment, offset); + long valueOffset = offset + size(key); + MemorySegment value = readValue(memorySegment, valueOffset); + return new BaseEntry<>(key, value); + } + + @Override + public long writeEntry(Entry entry, MemorySegment memorySegment, long offset) { + long valueOffset = writeValue(entry.key(), memorySegment, offset); + return writeValue(entry.value(), memorySegment, valueOffset); + } + + @Override + public long findLowerBoundValueOffset(MemorySegment key, MemorySegment storage, MemorySegment offsets) { + long entriesCount = offsets.byteSize() / SIZE_LENGTH; + long left = -1; + long right = entriesCount; + + while (left + 1 < right) { + long middle = left + (right - left) / 2; + long middleOffset = offsets.getAtIndex(LONG_LAYOUT, middle); + MemorySegment middleKey = readValue(storage, middleOffset); + + if (compare(middleKey, key) <= 0) { + left = middle; + } else { + right = middle; + } + } + return left == -1 ? -1 : offsets.getAtIndex(LONG_LAYOUT, left); + } + + @Override + public long size(MemorySegment value) { + if (value == null) { + return SIZE_LENGTH; + } + return SIZE_LENGTH + value.byteSize(); + } + + @Override + public long size(Entry entry) { + return size(entry.key()) + size(entry.value()); + } + + @Override + public Entry createEntry(MemorySegment key, MemorySegment value) { + return new BaseEntry<>(key, value); + } + + @Override + public int compare(MemorySegment a, MemorySegment b) { + long diffIndex = a.mismatch(b); + if (diffIndex == -1) { + return 0; + } else if (diffIndex == a.byteSize()) { + return -1; + } else if (diffIndex == b.byteSize()) { + return 1; + } + + byte byteA = a.getAtIndex(BYTE_LAYOUT, diffIndex); + byte byteB = b.getAtIndex(BYTE_LAYOUT, diffIndex); + return Byte.compare(byteA, byteB); + } +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java deleted file mode 100644 index bb947ebed..000000000 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java +++ /dev/null @@ -1,14 +0,0 @@ -package ru.vk.itmo.kovalchukvladislav; - -import ru.vk.itmo.Entry; -import java.lang.foreign.MemorySegment; - -public interface MemorySegmentSerializer> { - D toValue(MemorySegment input); - - MemorySegment fromValue(D value); - - long size(D value); - - E createEntry(D key, D value); -} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoIterator.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoIterator.java new file mode 100644 index 000000000..91eea3ba3 --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoIterator.java @@ -0,0 +1,193 @@ +package ru.vk.itmo.kovalchukvladislav.model; + +import ru.vk.itmo.Entry; + +import java.lang.foreign.MemorySegment; +import java.lang.foreign.ValueLayout; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.PriorityQueue; + +public class DaoIterator> implements Iterator { + private static final ValueLayout.OfLong LONG_LAYOUT = ValueLayout.JAVA_LONG_UNALIGNED; + private static final Integer IN_MEMORY_ITERATOR_ID = Integer.MAX_VALUE; + private final Iterator inMemoryIterator; + private final EntryExtractor extractor; + private final PriorityQueue queue; + private final List storageIterators; + + public DaoIterator(D from, D to, + Iterator inMemoryIterator, + List storageSegments, + List offsetsSegments, + EntryExtractor extractor) { + this.extractor = extractor; + this.inMemoryIterator = inMemoryIterator; + this.storageIterators = getStorageIterators(from, to, storageSegments, offsetsSegments); + this.queue = new PriorityQueue<>(1 + storageIterators.size()); + + addEntryByIteratorIdSafe(IN_MEMORY_ITERATOR_ID); + for (int i = 0; i < storageIterators.size(); i++) { + addEntryByIteratorIdSafe(i); + } + cleanByNull(); + } + + private List getStorageIterators(D from, D to, + List storageSegments, + List offsetsSegments) { + int storagesCount = storageSegments.size(); + final List storageIterators = new ArrayList<>(storagesCount); + for (int i = 0; i < storagesCount; i++) { + storageIterators.add(new StorageIterator(storageSegments.get(i), offsetsSegments.get(i), from, to)); + } + return storageIterators; + } + + @Override + public boolean hasNext() { + return !queue.isEmpty(); + } + + @Override + public E next() { + if (queue.isEmpty()) { + throw new NoSuchElementException(); + } + IndexedEntry minElement = queue.peek(); + E minEntry = minElement.entry; + cleanByKey(minElement.entry.key()); + cleanByNull(); + return minEntry; + } + + private void cleanByKey(D key) { + while (!queue.isEmpty() && extractor.compare(queue.peek().entry.key(), key) == 0) { + IndexedEntry removedEntry = queue.remove(); + int iteratorId = removedEntry.iteratorId; + addEntryByIteratorIdSafe(iteratorId); + } + } + + private void cleanByNull() { + while (!queue.isEmpty()) { + E entry = queue.peek().entry; + if (entry.value() != null) { + break; + } + cleanByKey(entry.key()); + } + } + + private void addEntryByIteratorIdSafe(int iteratorId) { + Iterator iteratorById = getIteratorById(iteratorId); + if (iteratorById.hasNext()) { + E next = iteratorById.next(); + queue.add(new IndexedEntry(iteratorId, next)); + } + } + + private Iterator getIteratorById(int id) { + if (id == IN_MEMORY_ITERATOR_ID) { + return inMemoryIterator; + } + return storageIterators.get(id); + } + + private class IndexedEntry implements Comparable { + final int iteratorId; + final E entry; + + public IndexedEntry(int iteratorId, E entry) { + this.iteratorId = iteratorId; + this.entry = entry; + } + + @Override + public int compareTo(IndexedEntry other) { + int compared = extractor.compare(entry.key(), other.entry.key()); + if (compared != 0) { + return compared; + } + return -Integer.compare(iteratorId, other.iteratorId); + } + } + + private class StorageIterator implements Iterator { + private final MemorySegment storageSegment; + private final long end; + private long start; + + public StorageIterator(MemorySegment storageSegment, MemorySegment offsetsSegment, D from, D to) { + this.storageSegment = storageSegment; + + if (offsetsSegment.byteSize() == 0) { + this.start = -1; + this.end = -1; + } else { + this.start = calculateStartPosition(offsetsSegment, from); + this.end = calculateEndPosition(offsetsSegment, to); + } + } + + private long calculateStartPosition(MemorySegment offsetsSegment, D from) { + if (from == null) { + return getFirstOffset(offsetsSegment); + } + long lowerBoundOffset = extractor.findLowerBoundValueOffset(from, storageSegment, offsetsSegment); + if (lowerBoundOffset == -1) { + // the smallest element and doesn't exist + return getFirstOffset(offsetsSegment); + } else { + // storage[lowerBoundOffset] <= from, we need >= only + return moveOffsetIfFirstKeyAreNotEqual(from, lowerBoundOffset); + } + } + + private long calculateEndPosition(MemorySegment offsetsSegment, D to) { + if (to == null) { + return getEndOffset(); + } + long lowerBoundOffset = extractor.findLowerBoundValueOffset(to, storageSegment, offsetsSegment); + if (lowerBoundOffset == -1) { + // the smallest element and doesn't exist + return getFirstOffset(offsetsSegment); + } + // storage[lowerBoundOffset] <= to, we need >= only + return moveOffsetIfFirstKeyAreNotEqual(to, lowerBoundOffset); + } + + private long getFirstOffset(MemorySegment offsetsSegment) { + return offsetsSegment.getAtIndex(LONG_LAYOUT, 0); + } + + private long getEndOffset() { + return storageSegment.byteSize(); + } + + private long moveOffsetIfFirstKeyAreNotEqual(D from, long lowerBoundOffset) { + long offset = lowerBoundOffset; + D lowerBoundKey = extractor.readValue(storageSegment, offset); + if (extractor.compare(lowerBoundKey, from) != 0) { + offset += extractor.size(lowerBoundKey); + D lowerBoundValue = extractor.readValue(storageSegment, offset); + offset += extractor.size(lowerBoundValue); + } + return offset; + } + + @Override + public boolean hasNext() { + return start < end; + } + + @Override + public E next() { + E entry = extractor.readEntry(storageSegment, start); + start += extractor.size(entry); + return entry; + } + } +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/EntryExtractor.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/EntryExtractor.java new file mode 100644 index 000000000..3893342ea --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/EntryExtractor.java @@ -0,0 +1,23 @@ +package ru.vk.itmo.kovalchukvladislav.model; + +import ru.vk.itmo.Entry; +import java.lang.foreign.MemorySegment; +import java.util.Comparator; + +public interface EntryExtractor> extends Comparator { + D readValue(MemorySegment memorySegment, long offset); + + long writeValue(D value, MemorySegment memorySegment, long offset); + + E readEntry(MemorySegment memorySegment, long offset); + + long writeEntry(E entry, MemorySegment memorySegment, long offset); + + long findLowerBoundValueOffset(D key, MemorySegment storage, MemorySegment offsets); + + long size(D value); + + long size(E entry); + + E createEntry(D key, D value); +} From ac821d4eddda172cded85d540876ffbe45235671 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 18 Oct 2023 23:59:00 +0300 Subject: [PATCH 17/36] codeclimate please --- .../ru/vk/itmo/kovalchukvladislav/model/DaoIterator.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoIterator.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoIterator.java index 91eea3ba3..47572a8ac 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoIterator.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/DaoIterator.java @@ -39,11 +39,11 @@ private List getStorageIterators(D from, D to, List storageSegments, List offsetsSegments) { int storagesCount = storageSegments.size(); - final List storageIterators = new ArrayList<>(storagesCount); + final List iterators = new ArrayList<>(storagesCount); for (int i = 0; i < storagesCount; i++) { - storageIterators.add(new StorageIterator(storageSegments.get(i), offsetsSegments.get(i), from, to)); + iterators.add(new StorageIterator(storageSegments.get(i), offsetsSegments.get(i), from, to)); } - return storageIterators; + return iterators; } @Override From a5a0281281b4699245656fe9b0f04c10fc458cf3 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 1 Nov 2023 22:04:31 +0300 Subject: [PATCH 18/36] review fixes --- .../AbstractBasedOnSSTableDao.java | 20 ++---------- .../MemorySegmentEntryExtractor.java | 31 ++++++++++++------- 2 files changed, 22 insertions(+), 29 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java index ca7033c0d..571eba8be 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java @@ -42,8 +42,6 @@ public abstract class AbstractBasedOnSSTableDao> extends A // =================================== private final int storagesCount; - private final List dbFileChannels; - private final List offsetChannels; private final List dbMappedSegments; private final List offsetMappedSegments; @@ -58,14 +56,12 @@ protected AbstractBasedOnSSTableDao(Config config, EntryExtractor extracto this.metadataPath = basePath.resolve(METADATA_FILENAME); this.storagesCount = getCountFromMetadataOrCreate(); - this.dbFileChannels = new ArrayList<>(storagesCount); - this.offsetChannels = new ArrayList<>(storagesCount); this.dbMappedSegments = new ArrayList<>(storagesCount); this.offsetMappedSegments = new ArrayList<>(storagesCount); for (int i = 0; i < storagesCount; i++) { - readFileAndMapToSegment(DB_FILENAME_PREFIX, i, dbFileChannels, dbMappedSegments); - readFileAndMapToSegment(OFFSETS_FILENAME_PREFIX, i, offsetChannels, offsetMappedSegments); + readFileAndMapToSegment(DB_FILENAME_PREFIX, i, dbMappedSegments); + readFileAndMapToSegment(OFFSETS_FILENAME_PREFIX, i, offsetMappedSegments); } } @@ -81,13 +77,11 @@ private int getCountFromMetadataOrCreate() throws IOException { } private void readFileAndMapToSegment(String filenamePrefix, int index, - List channels, List segments) throws IOException { Path path = basePath.resolve(filenamePrefix + index); try (FileChannel channel = FileChannel.open(path, StandardOpenOption.READ)) { MemorySegment segment = channel.map(FileChannel.MapMode.READ_ONLY, 0, Files.size(path), arena); - channels.add(channel); segments.add(segment); } } @@ -191,15 +185,5 @@ public synchronized void close() throws IOException { arena.close(); } flush(); - closeChannels(dbFileChannels); - closeChannels(offsetChannels); - } - - private void closeChannels(List channels) throws IOException { - for (FileChannel channel : channels) { - if (channel.isOpen()) { - channel.close(); - } - } } } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentEntryExtractor.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentEntryExtractor.java index 7d9df9968..59296f7c4 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentEntryExtractor.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentEntryExtractor.java @@ -7,16 +7,17 @@ import java.lang.foreign.MemorySegment; import java.lang.foreign.ValueLayout; -public class MemorySegmentEntryExtractor implements EntryExtractor> { +public final class MemorySegmentEntryExtractor implements EntryExtractor> { public static final MemorySegmentEntryExtractor INSTANCE = new MemorySegmentEntryExtractor(); - private static final ValueLayout.OfLong LONG_LAYOUT = ValueLayout.JAVA_LONG_UNALIGNED; - private static final ValueLayout.OfByte BYTE_LAYOUT = ValueLayout.JAVA_BYTE; - private static final int SIZE_LENGTH = Long.BYTES; + private static final long SIZE_LENGTH = ValueLayout.JAVA_BYTE.byteSize(); private static final long VALUE_IS_NULL_SIZE = -1; + private MemorySegmentEntryExtractor() { + } + @Override public MemorySegment readValue(MemorySegment memorySegment, long offset) { - long size = memorySegment.get(LONG_LAYOUT, offset); + long size = memorySegment.get(ValueLayout.JAVA_LONG_UNALIGNED, offset); if (size == VALUE_IS_NULL_SIZE) { return null; } @@ -26,11 +27,11 @@ public MemorySegment readValue(MemorySegment memorySegment, long offset) { @Override public long writeValue(MemorySegment valueSegment, MemorySegment memorySegment, long offset) { if (valueSegment == null) { - memorySegment.set(LONG_LAYOUT, offset, VALUE_IS_NULL_SIZE); + memorySegment.set(ValueLayout.JAVA_LONG_UNALIGNED, offset, VALUE_IS_NULL_SIZE); return offset + SIZE_LENGTH; } long size = valueSegment.byteSize(); - memorySegment.set(LONG_LAYOUT, offset, size); + memorySegment.set(ValueLayout.JAVA_LONG_UNALIGNED, offset, size); MemorySegment.copy(valueSegment, 0, memorySegment, offset + SIZE_LENGTH, size); return offset + SIZE_LENGTH + size; } @@ -57,7 +58,7 @@ public long findLowerBoundValueOffset(MemorySegment key, MemorySegment storage, while (left + 1 < right) { long middle = left + (right - left) / 2; - long middleOffset = offsets.getAtIndex(LONG_LAYOUT, middle); + long middleOffset = offsets.getAtIndex(ValueLayout.JAVA_LONG_UNALIGNED, middle); MemorySegment middleKey = readValue(storage, middleOffset); if (compare(middleKey, key) <= 0) { @@ -66,7 +67,7 @@ public long findLowerBoundValueOffset(MemorySegment key, MemorySegment storage, right = middle; } } - return left == -1 ? -1 : offsets.getAtIndex(LONG_LAYOUT, left); + return left == -1 ? -1 : offsets.getAtIndex(ValueLayout.JAVA_LONG_UNALIGNED, left); } @Override @@ -89,6 +90,14 @@ public Entry createEntry(MemorySegment key, MemorySegment value) @Override public int compare(MemorySegment a, MemorySegment b) { + if (a == null && b == null) { + return 0; + } else if (a == null) { + return 1; + } else if (b == null) { + return -1; + } + long diffIndex = a.mismatch(b); if (diffIndex == -1) { return 0; @@ -98,8 +107,8 @@ public int compare(MemorySegment a, MemorySegment b) { return 1; } - byte byteA = a.getAtIndex(BYTE_LAYOUT, diffIndex); - byte byteB = b.getAtIndex(BYTE_LAYOUT, diffIndex); + byte byteA = a.getAtIndex(ValueLayout.JAVA_BYTE, diffIndex); + byte byteB = b.getAtIndex(ValueLayout.JAVA_BYTE, diffIndex); return Byte.compare(byteA, byteB); } } From 2600814ddf2811c1019fcae671fb02406be54428 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 1 Nov 2023 22:08:59 +0300 Subject: [PATCH 19/36] hotfix --- .../vk/itmo/kovalchukvladislav/MemorySegmentEntryExtractor.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentEntryExtractor.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentEntryExtractor.java index 59296f7c4..f55e75fdb 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentEntryExtractor.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentEntryExtractor.java @@ -9,7 +9,7 @@ public final class MemorySegmentEntryExtractor implements EntryExtractor> { public static final MemorySegmentEntryExtractor INSTANCE = new MemorySegmentEntryExtractor(); - private static final long SIZE_LENGTH = ValueLayout.JAVA_BYTE.byteSize(); + private static final long SIZE_LENGTH = ValueLayout.JAVA_LONG_UNALIGNED.byteSize(); private static final long VALUE_IS_NULL_SIZE = -1; private MemorySegmentEntryExtractor() { From bb8c62b1e7d8c08ce046b5411ac3c3ce957919d9 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 1 Nov 2023 23:51:44 +0300 Subject: [PATCH 20/36] initial commit --- .../AbstractBasedOnSSTableDao.java | 127 +++++++++++++----- .../MemorySegmentDaoFactory.java | 2 +- 2 files changed, 94 insertions(+), 35 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java index 571eba8be..7f4fee714 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java @@ -5,6 +5,7 @@ import ru.vk.itmo.kovalchukvladislav.model.DaoIterator; import ru.vk.itmo.kovalchukvladislav.model.EntryExtractor; +import java.io.File; import java.io.IOException; import java.lang.foreign.Arena; import java.lang.foreign.MemorySegment; @@ -15,9 +16,11 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.ArrayList; +import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; public abstract class AbstractBasedOnSSTableDao> extends AbstractInMemoryDao { // =================================== @@ -25,7 +28,6 @@ public abstract class AbstractBasedOnSSTableDao> extends A // =================================== private static final ValueLayout.OfLong LONG_LAYOUT = ValueLayout.JAVA_LONG_UNALIGNED; private static final String OFFSETS_FILENAME_PREFIX = "offsets_"; - private static final String METADATA_FILENAME = "metadata"; private static final String DB_FILENAME_PREFIX = "db_"; // =================================== @@ -33,7 +35,6 @@ public abstract class AbstractBasedOnSSTableDao> extends A // =================================== private final Path basePath; - private final Path metadataPath; private final Arena arena = Arena.ofShared(); private final EntryExtractor extractor; @@ -41,7 +42,7 @@ public abstract class AbstractBasedOnSSTableDao> extends A // Storages // =================================== - private final int storagesCount; + private final AtomicInteger storagesCount = new AtomicInteger(0); private final List dbMappedSegments; private final List offsetMappedSegments; @@ -53,39 +54,52 @@ protected AbstractBasedOnSSTableDao(Config config, EntryExtractor extracto if (!Files.exists(basePath)) { Files.createDirectory(basePath); } - this.metadataPath = basePath.resolve(METADATA_FILENAME); - - this.storagesCount = getCountFromMetadataOrCreate(); - this.dbMappedSegments = new ArrayList<>(storagesCount); - this.offsetMappedSegments = new ArrayList<>(storagesCount); - - for (int i = 0; i < storagesCount; i++) { - readFileAndMapToSegment(DB_FILENAME_PREFIX, i, dbMappedSegments); - readFileAndMapToSegment(OFFSETS_FILENAME_PREFIX, i, offsetMappedSegments); - } + this.dbMappedSegments = new ArrayList<>(); + this.offsetMappedSegments = new ArrayList<>(); + readFilesAndMapToSegment(); } // =================================== // Restoring state // =================================== - private int getCountFromMetadataOrCreate() throws IOException { - if (!Files.exists(metadataPath)) { - Files.writeString(metadataPath, "0", StandardOpenOption.WRITE, StandardOpenOption.CREATE); - return 0; + + private void readFilesAndMapToSegment() throws IOException { + File dir = new File(basePath.toString()); + File[] files = dir.listFiles(it -> it.getName().startsWith(DB_FILENAME_PREFIX)); + if (files == null) { + return; } - return Integer.parseInt(Files.readString(metadataPath)); - } + Arrays.sort(files); - private void readFileAndMapToSegment(String filenamePrefix, int index, - List segments) throws IOException { - Path path = basePath.resolve(filenamePrefix + index); - try (FileChannel channel = FileChannel.open(path, StandardOpenOption.READ)) { + for (File file : files) { + String name = file.getName(); + int index = name.indexOf(DB_FILENAME_PREFIX); + if (index == -1) { + continue; + } + String timestamp = name.substring(index + DB_FILENAME_PREFIX.length()); + readFileAndMapToSegment(timestamp); + } + } - MemorySegment segment = channel.map(FileChannel.MapMode.READ_ONLY, 0, Files.size(path), arena); - segments.add(segment); + private void readFileAndMapToSegment(String timestamp) throws IOException { + Path dbPath = basePath.resolve(DB_FILENAME_PREFIX + timestamp); + Path offsetsPath = basePath.resolve(OFFSETS_FILENAME_PREFIX + timestamp); + if (!Files.exists(dbPath) || !Files.exists(offsetsPath)) { + return; + } + try (FileChannel dbChannel = FileChannel.open(dbPath, StandardOpenOption.READ); + FileChannel offsetChannel = FileChannel.open(offsetsPath, StandardOpenOption.READ)) { + + MemorySegment db = dbChannel.map(FileChannel.MapMode.READ_ONLY, 0, Files.size(dbPath), arena); + MemorySegment offsets = offsetChannel.map(FileChannel.MapMode.READ_ONLY, 0, Files.size(offsetsPath), arena); + dbMappedSegments.add(db); + offsetMappedSegments.add(offsets); + storagesCount.incrementAndGet(); } } + // =================================== // Finding in storage // =================================== @@ -106,7 +120,7 @@ public E get(D key) { } private E findInStorages(D key) { - for (int i = storagesCount - 1; i >= 0; i--) { + for (int i = storagesCount.get() - 1; i >= 0; i--) { MemorySegment storage = dbMappedSegments.get(i); MemorySegment offsets = offsetMappedSegments.get(i); @@ -128,7 +142,25 @@ private E findInStorages(D key) { // =================================== // Writing data // =================================== - private void writeData() throws IOException { + + private void writeMemoryDAO() throws IOException { + writeData(dao.values().iterator(), dao.size(), getDAOBytesSize()); + } + + private void writeMemoryAndStorageDAO() throws IOException { + Iterator allIterator = all(); + long entriesCount = 0; + long daoSize = 0; + + while (allIterator.hasNext()) { + E next = allIterator.next(); + entriesCount++; + daoSize += extractor.size(next); + } + writeData(all(), entriesCount, daoSize); + } + + private void writeData(Iterator daoIterator, long entriesCount, long daoSize) throws IOException { Path dbPath = basePath.resolve(DB_FILENAME_PREFIX + storagesCount); Path offsetsPath = basePath.resolve(OFFSETS_FILENAME_PREFIX + storagesCount); @@ -143,14 +175,14 @@ private void writeData() throws IOException { FileChannel offsets = FileChannel.open(offsetsPath, options); Arena confinedArena = Arena.ofConfined()) { - long dbSize = getDAOBytesSize(); - long offsetsSize = (long) dao.size() * Long.BYTES; - MemorySegment fileSegment = db.map(FileChannel.MapMode.READ_WRITE, 0, dbSize, confinedArena); + long offsetsSize = entriesCount * Long.BYTES; + MemorySegment fileSegment = db.map(FileChannel.MapMode.READ_WRITE, 0, daoSize, confinedArena); MemorySegment offsetsSegment = offsets.map(FileChannel.MapMode.READ_WRITE, 0, offsetsSize, confinedArena); int i = 0; long offset = 0; - for (E entry : dao.values()) { + while (daoIterator.hasNext()) { + E entry = daoIterator.next(); offsetsSegment.setAtIndex(LONG_LAYOUT, i, offset); i += 1; offset = extractor.writeEntry(entry, fileSegment, offset); @@ -171,19 +203,46 @@ private long getDAOBytesSize() { // =================================== // Flush and close // =================================== + private void clear() { + dao.clear(); + dbMappedSegments.clear(); + offsetMappedSegments.clear(); + storagesCount.set(0); + } + @Override public synchronized void flush() throws IOException { if (!dao.isEmpty()) { - writeData(); - Files.writeString(metadataPath, String.valueOf(storagesCount + 1)); + writeMemoryDAO(); } } @Override public synchronized void close() throws IOException { + compact(); if (arena.scope().isAlive()) { arena.close(); } - flush(); + } + + @Override + public void compact() throws IOException { + writeMemoryAndStorageDAO(); + clear(); + deleteFilesExceptLatest(DB_FILENAME_PREFIX); + deleteFilesExceptLatest(OFFSETS_FILENAME_PREFIX); + readFilesAndMapToSegment(); + } + + private void deleteFilesExceptLatest(String prefix) throws IOException { + File dir = new File(basePath.toString()); + File[] files = dir.listFiles(it -> it.getName().startsWith(prefix)); + if (files == null) { + return; + } + Arrays.sort(files); + for (int i = 0; i + 1 < files.length; i++) { + Files.delete(files[i].toPath()); + } } } diff --git a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java index c96427e2f..a0d08eb31 100644 --- a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java +++ b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java @@ -12,7 +12,7 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; -@DaoFactory(stage = 3) +@DaoFactory(stage = 4) public class MemorySegmentDaoFactory implements DaoFactory.Factory> { private static final Charset CHARSET = StandardCharsets.UTF_8; private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; From a3febfcb9e2679b92aa7bf62b4d6a79ab54fa1ff Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 1 Nov 2023 23:58:00 +0300 Subject: [PATCH 21/36] cringe test --- .../AbstractBasedOnSSTableDao.java | 87 ++++++++++++------- 1 file changed, 56 insertions(+), 31 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java index 7f4fee714..37aeae618 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java @@ -20,7 +20,7 @@ import java.util.Iterator; import java.util.List; import java.util.Objects; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Logger; public abstract class AbstractBasedOnSSTableDao> extends AbstractInMemoryDao { // =================================== @@ -42,9 +42,11 @@ public abstract class AbstractBasedOnSSTableDao> extends A // Storages // =================================== - private final AtomicInteger storagesCount = new AtomicInteger(0); + private int storagesCount = 0; + private volatile boolean closed = false; private final List dbMappedSegments; private final List offsetMappedSegments; + private final Logger logger = Logger.getLogger(getClass().getSimpleName()); protected AbstractBasedOnSSTableDao(Config config, EntryExtractor extractor) throws IOException { super(extractor); @@ -56,14 +58,15 @@ protected AbstractBasedOnSSTableDao(Config config, EntryExtractor extracto } this.dbMappedSegments = new ArrayList<>(); this.offsetMappedSegments = new ArrayList<>(); - readFilesAndMapToSegment(); + reloadFilesAndMapToSegment(); } // =================================== // Restoring state // =================================== - private void readFilesAndMapToSegment() throws IOException { + private void reloadFilesAndMapToSegment() throws IOException { + logger.info(() -> "Reloading files"); File dir = new File(basePath.toString()); File[] files = dir.listFiles(it -> it.getName().startsWith(DB_FILENAME_PREFIX)); if (files == null) { @@ -80,6 +83,7 @@ private void readFilesAndMapToSegment() throws IOException { String timestamp = name.substring(index + DB_FILENAME_PREFIX.length()); readFileAndMapToSegment(timestamp); } + logger.info(() -> String.format("Reloaded %d files", storagesCount)); } private void readFileAndMapToSegment(String timestamp) throws IOException { @@ -88,6 +92,9 @@ private void readFileAndMapToSegment(String timestamp) throws IOException { if (!Files.exists(dbPath) || !Files.exists(offsetsPath)) { return; } + + logger.info(() -> String.format("Reading files with timestamp %s", timestamp)); + try (FileChannel dbChannel = FileChannel.open(dbPath, StandardOpenOption.READ); FileChannel offsetChannel = FileChannel.open(offsetsPath, StandardOpenOption.READ)) { @@ -95,8 +102,10 @@ private void readFileAndMapToSegment(String timestamp) throws IOException { MemorySegment offsets = offsetChannel.map(FileChannel.MapMode.READ_ONLY, 0, Files.size(offsetsPath), arena); dbMappedSegments.add(db); offsetMappedSegments.add(offsets); - storagesCount.incrementAndGet(); + storagesCount++; } + + logger.info(() -> String.format("Successfully read files with %s timestamp", timestamp)); } @@ -120,7 +129,7 @@ public E get(D key) { } private E findInStorages(D key) { - for (int i = storagesCount.get() - 1; i >= 0; i--) { + for (int i = storagesCount - 1; i >= 0; i--) { MemorySegment storage = dbMappedSegments.get(i); MemorySegment offsets = offsetMappedSegments.get(i); @@ -143,11 +152,11 @@ private E findInStorages(D key) { // Writing data // =================================== - private void writeMemoryDAO() throws IOException { - writeData(dao.values().iterator(), dao.size(), getDAOBytesSize()); + private long writeMemoryDAO() throws IOException { + return writeData(dao.values().iterator(), dao.size(), getDAOBytesSize()); } - private void writeMemoryAndStorageDAO() throws IOException { + private long writeMemoryAndStorageDAO() throws IOException { Iterator allIterator = all(); long entriesCount = 0; long daoSize = 0; @@ -157,12 +166,18 @@ private void writeMemoryAndStorageDAO() throws IOException { entriesCount++; daoSize += extractor.size(next); } - writeData(all(), entriesCount, daoSize); + return writeData(all(), entriesCount, daoSize); } - private void writeData(Iterator daoIterator, long entriesCount, long daoSize) throws IOException { - Path dbPath = basePath.resolve(DB_FILENAME_PREFIX + storagesCount); - Path offsetsPath = basePath.resolve(OFFSETS_FILENAME_PREFIX + storagesCount); + /** + * Returns created files timestamp. + */ + private long writeData(Iterator daoIterator, long entriesCount, long daoSize) throws IOException { + long timestamp = System.currentTimeMillis(); + Path dbPath = basePath.resolve(DB_FILENAME_PREFIX + timestamp); + Path offsetsPath = basePath.resolve(OFFSETS_FILENAME_PREFIX + timestamp); + + logger.info(() -> String.format("Writing files with %s timestamp", timestamp)); OpenOption[] options = new OpenOption[] { StandardOpenOption.READ, @@ -190,6 +205,11 @@ private void writeData(Iterator daoIterator, long entriesCount, long daoSize) fileSegment.load(); offsetsSegment.load(); } + + logger.info(() -> String.format("Successfully writing with %s timestamp. Entries count %d, daoSize %d", + timestamp, entriesCount, daoSize)); + + return timestamp; } private long getDAOBytesSize() { @@ -203,46 +223,51 @@ private long getDAOBytesSize() { // =================================== // Flush and close // =================================== - private void clear() { - dao.clear(); - dbMappedSegments.clear(); - offsetMappedSegments.clear(); - storagesCount.set(0); - } @Override public synchronized void flush() throws IOException { if (!dao.isEmpty()) { - writeMemoryDAO(); + long flushedFileName = writeMemoryDAO(); + logger.info(() -> String.format("Flushed timestamp is %d", flushedFileName)); } } @Override public synchronized void close() throws IOException { - compact(); + if (closed) { + return; + } + flush(); if (arena.scope().isAlive()) { arena.close(); } + closed = true; } @Override - public void compact() throws IOException { - writeMemoryAndStorageDAO(); - clear(); - deleteFilesExceptLatest(DB_FILENAME_PREFIX); - deleteFilesExceptLatest(OFFSETS_FILENAME_PREFIX); - readFilesAndMapToSegment(); + public synchronized void compact() throws IOException { + long createdFileTimestamp = writeMemoryAndStorageDAO(); + logger.info(() -> String.format("Compacted timestamp is %d", createdFileTimestamp)); + deleteFilesExceptWithTimeStamp(String.valueOf(createdFileTimestamp)); } - private void deleteFilesExceptLatest(String prefix) throws IOException { + private void deleteFilesExceptWithTimeStamp(String excludedTimeStamp) { File dir = new File(basePath.toString()); - File[] files = dir.listFiles(it -> it.getName().startsWith(prefix)); + + File[] files = dir.listFiles(it -> shouldDelete(it.getName(), excludedTimeStamp)); if (files == null) { + logger.info(() -> String.format("Not found files to delete, excludedTimeStamp %s", excludedTimeStamp)); return; } Arrays.sort(files); - for (int i = 0; i + 1 < files.length; i++) { - Files.delete(files[i].toPath()); + for (File file : files) { + boolean deleted = file.delete(); + logger.info(() -> String.format("Delete %s file: %s", file.getName(), deleted)); } } + + private boolean shouldDelete(String fileName, String excludedTimeStamp) { + return (fileName.startsWith(DB_FILENAME_PREFIX) || fileName.startsWith(OFFSETS_FILENAME_PREFIX)) + && !fileName.endsWith(excludedTimeStamp); + } } From f8df75c30feb3acfa00bea4bb882802f6b935884 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 1 Nov 2023 23:59:00 +0300 Subject: [PATCH 22/36] codeclimate fix --- .../AbstractBasedOnSSTableDao.java | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java index 37aeae618..20304e0e6 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java @@ -42,14 +42,16 @@ public abstract class AbstractBasedOnSSTableDao> extends A // Storages // =================================== - private int storagesCount = 0; - private volatile boolean closed = false; + private int storagesCount; + private volatile boolean closed; private final List dbMappedSegments; private final List offsetMappedSegments; private final Logger logger = Logger.getLogger(getClass().getSimpleName()); protected AbstractBasedOnSSTableDao(Config config, EntryExtractor extractor) throws IOException { super(extractor); + this.closed = false; + this.storagesCount = 0; this.extractor = extractor; this.basePath = Objects.requireNonNull(config.basePath()); @@ -251,7 +253,7 @@ public synchronized void compact() throws IOException { deleteFilesExceptWithTimeStamp(String.valueOf(createdFileTimestamp)); } - private void deleteFilesExceptWithTimeStamp(String excludedTimeStamp) { + private void deleteFilesExceptWithTimeStamp(String excludedTimeStamp) throws IOException { File dir = new File(basePath.toString()); File[] files = dir.listFiles(it -> shouldDelete(it.getName(), excludedTimeStamp)); @@ -261,8 +263,8 @@ private void deleteFilesExceptWithTimeStamp(String excludedTimeStamp) { } Arrays.sort(files); for (File file : files) { - boolean deleted = file.delete(); - logger.info(() -> String.format("Delete %s file: %s", file.getName(), deleted)); + Files.delete(file.toPath()); + logger.info(() -> String.format("Delete %s file", file.getName())); } } From 2b93168d5baa4e0ba2dbc4fa5a108431fc83d71d Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 22 Nov 2023 04:35:55 +0300 Subject: [PATCH 23/36] remove serializer --- .../MemorySegmentSerializer.java | 14 -------------- 1 file changed, 14 deletions(-) delete mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java deleted file mode 100644 index bb947ebed..000000000 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/MemorySegmentSerializer.java +++ /dev/null @@ -1,14 +0,0 @@ -package ru.vk.itmo.kovalchukvladislav; - -import ru.vk.itmo.Entry; -import java.lang.foreign.MemorySegment; - -public interface MemorySegmentSerializer> { - D toValue(MemorySegment input); - - MemorySegment fromValue(D value); - - long size(D value); - - E createEntry(D key, D value); -} From 18c1b5775601d2649286a76af70f768cde562b49 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 22 Nov 2023 12:59:00 +0300 Subject: [PATCH 24/36] revert 2 --- .../itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java index a0d08eb31..7867c9fd4 100644 --- a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java +++ b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java @@ -12,7 +12,7 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; -@DaoFactory(stage = 4) +@DaoFactory(stage = 5) public class MemorySegmentDaoFactory implements DaoFactory.Factory> { private static final Charset CHARSET = StandardCharsets.UTF_8; private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; From 3fc588fbcf3954ede1ea0f5549fd48ac4ba3ecee Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Thu, 23 Nov 2023 07:54:46 +0300 Subject: [PATCH 25/36] review fixes --- .../AbstractBasedOnSSTableDao.java | 156 ++++++----------- .../SSTableMemorySegmentWriter.java | 164 ++++++++++++++++++ .../kovalchukvladislav/model/TableInfo.java | 27 +++ 3 files changed, 243 insertions(+), 104 deletions(-) create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/SSTableMemorySegmentWriter.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/model/TableInfo.java diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java index 20304e0e6..8d2fec029 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java @@ -4,29 +4,29 @@ import ru.vk.itmo.Entry; import ru.vk.itmo.kovalchukvladislav.model.DaoIterator; import ru.vk.itmo.kovalchukvladislav.model.EntryExtractor; +import ru.vk.itmo.kovalchukvladislav.model.TableInfo; -import java.io.File; import java.io.IOException; import java.lang.foreign.Arena; import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; -import java.nio.file.OpenOption; import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Objects; +import java.util.logging.Level; import java.util.logging.Logger; public abstract class AbstractBasedOnSSTableDao> extends AbstractInMemoryDao { // =================================== // Constants // =================================== - private static final ValueLayout.OfLong LONG_LAYOUT = ValueLayout.JAVA_LONG_UNALIGNED; + private static final String METADATA_FILENAME = "metadata"; private static final String OFFSETS_FILENAME_PREFIX = "offsets_"; private static final String DB_FILENAME_PREFIX = "db_"; @@ -37,6 +37,7 @@ public abstract class AbstractBasedOnSSTableDao> extends A private final Path basePath; private final Arena arena = Arena.ofShared(); private final EntryExtractor extractor; + private final SSTableMemorySegmentWriter writer; // =================================== // Storages @@ -54,13 +55,12 @@ protected AbstractBasedOnSSTableDao(Config config, EntryExtractor extracto this.storagesCount = 0; this.extractor = extractor; this.basePath = Objects.requireNonNull(config.basePath()); - - if (!Files.exists(basePath)) { - Files.createDirectory(basePath); - } this.dbMappedSegments = new ArrayList<>(); this.offsetMappedSegments = new ArrayList<>(); reloadFilesAndMapToSegment(); + this.writer = new SSTableMemorySegmentWriter<>(basePath, DB_FILENAME_PREFIX, OFFSETS_FILENAME_PREFIX, + METADATA_FILENAME, extractor); + logger.setLevel(Level.OFF); // чтобы не засорять вывод в гитхабе, если такое возможно } // =================================== @@ -68,22 +68,13 @@ protected AbstractBasedOnSSTableDao(Config config, EntryExtractor extracto // =================================== private void reloadFilesAndMapToSegment() throws IOException { - logger.info(() -> "Reloading files"); - File dir = new File(basePath.toString()); - File[] files = dir.listFiles(it -> it.getName().startsWith(DB_FILENAME_PREFIX)); - if (files == null) { - return; + if (!Files.exists(basePath)) { + Files.createDirectory(basePath); } - Arrays.sort(files); - - for (File file : files) { - String name = file.getName(); - int index = name.indexOf(DB_FILENAME_PREFIX); - if (index == -1) { - continue; - } - String timestamp = name.substring(index + DB_FILENAME_PREFIX.length()); - readFileAndMapToSegment(timestamp); + logger.info(() -> String.format("Reloading files from %s", basePath)); + List ssTableIds = getSSTableIds(); + for (String ssTableId : ssTableIds) { + readFileAndMapToSegment(ssTableId); } logger.info(() -> String.format("Reloaded %d files", storagesCount)); } @@ -92,6 +83,7 @@ private void readFileAndMapToSegment(String timestamp) throws IOException { Path dbPath = basePath.resolve(DB_FILENAME_PREFIX + timestamp); Path offsetsPath = basePath.resolve(OFFSETS_FILENAME_PREFIX + timestamp); if (!Files.exists(dbPath) || !Files.exists(offsetsPath)) { + logger.severe(() -> String.format("File under path %s or %s doesn't exists", dbPath, offsetsPath)); return; } @@ -106,10 +98,29 @@ private void readFileAndMapToSegment(String timestamp) throws IOException { offsetMappedSegments.add(offsets); storagesCount++; } - logger.info(() -> String.format("Successfully read files with %s timestamp", timestamp)); } + private List getSSTableIds() throws IOException { + Path metadataPath = basePath.resolve(METADATA_FILENAME); + if (!Files.exists(metadataPath)) { + return Collections.emptyList(); + } + return Files.readAllLines(metadataPath, StandardCharsets.UTF_8); + } + + private Path[] getAllTablesPath() throws IOException { + List ssTableIds = getSSTableIds(); + int size = ssTableIds.size(); + Path[] files = new Path[2 * size]; + + for (int i = 0; i < size; i++) { + String id = ssTableIds.get(i); + files[2 * i] = basePath.resolve(DB_FILENAME_PREFIX + id); + files[2 * i + 1] = basePath.resolve(OFFSETS_FILENAME_PREFIX + id); + } + return files; + } // =================================== // Finding in storage @@ -151,14 +162,18 @@ private E findInStorages(D key) { } // =================================== - // Writing data + // Some utils // =================================== - private long writeMemoryDAO() throws IOException { - return writeData(dao.values().iterator(), dao.size(), getDAOBytesSize()); + private TableInfo getInMemoryDaoSizeInfo() { + long size = 0; + for (E entry : dao.values()) { + size += extractor.size(entry); + } + return new TableInfo(dao.size(), size); } - private long writeMemoryAndStorageDAO() throws IOException { + private TableInfo getSSTableDaoSizeInfo() { Iterator allIterator = all(); long entriesCount = 0; long daoSize = 0; @@ -168,58 +183,8 @@ private long writeMemoryAndStorageDAO() throws IOException { entriesCount++; daoSize += extractor.size(next); } - return writeData(all(), entriesCount, daoSize); - } - - /** - * Returns created files timestamp. - */ - private long writeData(Iterator daoIterator, long entriesCount, long daoSize) throws IOException { - long timestamp = System.currentTimeMillis(); - Path dbPath = basePath.resolve(DB_FILENAME_PREFIX + timestamp); - Path offsetsPath = basePath.resolve(OFFSETS_FILENAME_PREFIX + timestamp); - - logger.info(() -> String.format("Writing files with %s timestamp", timestamp)); - - OpenOption[] options = new OpenOption[] { - StandardOpenOption.READ, - StandardOpenOption.WRITE, - StandardOpenOption.TRUNCATE_EXISTING, - StandardOpenOption.CREATE - }; - - try (FileChannel db = FileChannel.open(dbPath, options); - FileChannel offsets = FileChannel.open(offsetsPath, options); - Arena confinedArena = Arena.ofConfined()) { - - long offsetsSize = entriesCount * Long.BYTES; - MemorySegment fileSegment = db.map(FileChannel.MapMode.READ_WRITE, 0, daoSize, confinedArena); - MemorySegment offsetsSegment = offsets.map(FileChannel.MapMode.READ_WRITE, 0, offsetsSize, confinedArena); - - int i = 0; - long offset = 0; - while (daoIterator.hasNext()) { - E entry = daoIterator.next(); - offsetsSegment.setAtIndex(LONG_LAYOUT, i, offset); - i += 1; - offset = extractor.writeEntry(entry, fileSegment, offset); - } - fileSegment.load(); - offsetsSegment.load(); - } - logger.info(() -> String.format("Successfully writing with %s timestamp. Entries count %d, daoSize %d", - timestamp, entriesCount, daoSize)); - - return timestamp; - } - - private long getDAOBytesSize() { - long size = 0; - for (E entry : dao.values()) { - size += extractor.size(entry); - } - return size; + return new TableInfo(entriesCount, daoSize); } // =================================== @@ -228,10 +193,10 @@ private long getDAOBytesSize() { @Override public synchronized void flush() throws IOException { - if (!dao.isEmpty()) { - long flushedFileName = writeMemoryDAO(); - logger.info(() -> String.format("Flushed timestamp is %d", flushedFileName)); + if (dao.isEmpty()) { + return; } + writer.flush(dao.values().iterator(), getInMemoryDaoSizeInfo()); } @Override @@ -248,28 +213,11 @@ public synchronized void close() throws IOException { @Override public synchronized void compact() throws IOException { - long createdFileTimestamp = writeMemoryAndStorageDAO(); - logger.info(() -> String.format("Compacted timestamp is %d", createdFileTimestamp)); - deleteFilesExceptWithTimeStamp(String.valueOf(createdFileTimestamp)); - } - - private void deleteFilesExceptWithTimeStamp(String excludedTimeStamp) throws IOException { - File dir = new File(basePath.toString()); - - File[] files = dir.listFiles(it -> shouldDelete(it.getName(), excludedTimeStamp)); - if (files == null) { - logger.info(() -> String.format("Not found files to delete, excludedTimeStamp %s", excludedTimeStamp)); + if (storagesCount <= 1 && dao.isEmpty()) { return; } - Arrays.sort(files); - for (File file : files) { - Files.delete(file.toPath()); - logger.info(() -> String.format("Delete %s file", file.getName())); - } - } - - private boolean shouldDelete(String fileName, String excludedTimeStamp) { - return (fileName.startsWith(DB_FILENAME_PREFIX) || fileName.startsWith(OFFSETS_FILENAME_PREFIX)) - && !fileName.endsWith(excludedTimeStamp); + Path[] oldTables = getAllTablesPath(); + writer.compact(all(), getSSTableDaoSizeInfo()); + writer.deleteUnusedFiles(oldTables); } } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/SSTableMemorySegmentWriter.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/SSTableMemorySegmentWriter.java new file mode 100644 index 000000000..077a90c75 --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/SSTableMemorySegmentWriter.java @@ -0,0 +1,164 @@ +package ru.vk.itmo.kovalchukvladislav; + +import ru.vk.itmo.Entry; +import ru.vk.itmo.kovalchukvladislav.model.EntryExtractor; +import ru.vk.itmo.kovalchukvladislav.model.TableInfo; + +import java.io.IOException; +import java.lang.foreign.Arena; +import java.lang.foreign.MemorySegment; +import java.lang.foreign.ValueLayout; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; +import java.util.Comparator; +import java.util.Iterator; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Stream; + +public class SSTableMemorySegmentWriter> { + private static final Logger logger = Logger.getLogger(SSTableMemorySegmentWriter.class.getSimpleName()); + private static final OpenOption[] WRITE_OPTIONS = new OpenOption[] { + StandardOpenOption.READ, + StandardOpenOption.WRITE, + StandardOpenOption.TRUNCATE_EXISTING, + StandardOpenOption.CREATE + }; + + private static final StandardCopyOption[] MOVE_OPTIONS = new StandardCopyOption[] { + StandardCopyOption.ATOMIC_MOVE, + StandardCopyOption.REPLACE_EXISTING + }; + + private final Path basePath; + private final String metadataFilename; + private final String dbFilenamePrefix; + private final String offsetsFilenamePrefix; + private final EntryExtractor extractor; + + public SSTableMemorySegmentWriter(Path basePath, String dbFilenamePrefix, String offsetsFilenamePrefix, + String metadataFilename, EntryExtractor extractor) { + this.basePath = basePath; + this.dbFilenamePrefix = dbFilenamePrefix; + this.offsetsFilenamePrefix = offsetsFilenamePrefix; + this.metadataFilename = metadataFilename; + this.extractor = extractor; + logger.setLevel(Level.OFF); // чтобы не засорять вывод в гитхабе, если такое возможно + } + + public void compact(Iterator iterator, TableInfo info) throws IOException { + Path tempDirectory = Files.createTempDirectory(null); + String timestamp = String.valueOf(System.currentTimeMillis()); + + Path newSSTable = basePath.resolve(dbFilenamePrefix + timestamp); + Path newOffsetsTable = basePath.resolve(offsetsFilenamePrefix + timestamp); + Path tmpSSTable = tempDirectory.resolve(dbFilenamePrefix + timestamp); + Path tmpOffsetsTable = tempDirectory.resolve(offsetsFilenamePrefix + timestamp); + + logger.info(() -> String.format("Compacting started to dir %s, timestamp %s, info %s", + tempDirectory, timestamp, info)); + + try { + writeData(tempDirectory, timestamp, iterator, info); + Path tmpMetadata = addSSTableId(tempDirectory, timestamp); + Path newMetadata = basePath.resolve(metadataFilename); + + Files.move(tmpSSTable, newSSTable, MOVE_OPTIONS); + Files.move(tmpOffsetsTable, newOffsetsTable, MOVE_OPTIONS); + Files.move(tmpMetadata, newMetadata, MOVE_OPTIONS); + } catch (Exception e) { + deleteUnusedFiles(newSSTable, newOffsetsTable); + throw e; + } finally { + deleteUnusedFiles(tempDirectory); + } + logger.info(() -> String.format("Compacted to dir %s, timestamp %s", basePath, timestamp)); + } + + public void flush(Iterator iterator, TableInfo info) throws IOException { + Path tempDirectory = Files.createTempDirectory(null); + String timestamp = String.valueOf(System.currentTimeMillis()); + + Path newSSTable = basePath.resolve(dbFilenamePrefix + timestamp); + Path newOffsetsTable = basePath.resolve(offsetsFilenamePrefix + timestamp); + Path tmpSSTable = tempDirectory.resolve(dbFilenamePrefix + timestamp); + Path tmpOffsetsTable = tempDirectory.resolve(offsetsFilenamePrefix + timestamp); + + logger.info(() -> String.format("Flushing started to dir %s, timestamp %s, info %s", + tempDirectory, timestamp, info)); + try { + writeData(tempDirectory, timestamp, iterator, info); + + Files.move(tmpSSTable, newSSTable, MOVE_OPTIONS); + Files.move(tmpOffsetsTable, newOffsetsTable, MOVE_OPTIONS); + addSSTableId(basePath, timestamp); + } catch (Exception e) { + deleteUnusedFiles(newSSTable, newOffsetsTable); + throw e; + } finally { + deleteUnusedFilesInDirectory(tempDirectory); + } + logger.info(() -> String.format("Flushed to dir %s, timestamp %s", basePath, timestamp)); + } + + // Удаление ненужных файлов не является чем то критически важным + // Если произойдет исключение, лучше словить и вывести в лог, чем останавливать работу + public void deleteUnusedFiles(Path... files) { + for (Path file : files) { + try { + boolean deleted = Files.deleteIfExists(file); + if (deleted) { + logger.info(() -> String.format("File %s was deleted", file)); + } else { + logger.severe(() -> String.format("File %s not deleted", file)); + } + } catch (IOException e) { + logger.severe(() -> String.format("Error while deleting file %s: %s", file, e.getMessage())); + } + } + } + + private void deleteUnusedFilesInDirectory(Path directory) { + try (Stream files = Files.walk(directory)) { + Path[] array = files.sorted(Comparator.reverseOrder()).toArray(Path[]::new); + deleteUnusedFiles(array); + } catch (Exception e) { + logger.severe(() -> String.format("Error while deleting directory %s: %s", directory, e.getMessage())); + } + } + + private void writeData(Path path, String timestamp, Iterator daoIterator, TableInfo info) throws IOException { + Path dbPath = path.resolve(dbFilenamePrefix + timestamp); + Path offsetsPath = path.resolve(offsetsFilenamePrefix + timestamp); + + try (FileChannel db = FileChannel.open(dbPath, WRITE_OPTIONS); + FileChannel offsets = FileChannel.open(offsetsPath, WRITE_OPTIONS); + Arena arena = Arena.ofConfined()) { + + long offsetsSize = info.getRecordsCount() * Long.BYTES; + MemorySegment fileSegment = db.map(FileChannel.MapMode.READ_WRITE, 0, info.getRecordsSize(), arena); + MemorySegment offsetsSegment = offsets.map(FileChannel.MapMode.READ_WRITE, 0, offsetsSize, arena); + + int i = 0; + long offset = 0; + while (daoIterator.hasNext()) { + E entry = daoIterator.next(); + offsetsSegment.setAtIndex(ValueLayout.JAVA_LONG_UNALIGNED, i, offset); + offset = extractor.writeEntry(entry, fileSegment, offset); + i += 1; + } + + fileSegment.load(); + offsetsSegment.load(); + } + } + + private Path addSSTableId(Path path, String id) throws IOException { + return Files.writeString(path.resolve(metadataFilename), id + System.lineSeparator(), + StandardOpenOption.WRITE, StandardOpenOption.APPEND, StandardOpenOption.CREATE); + } +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/TableInfo.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/TableInfo.java new file mode 100644 index 000000000..2331fe563 --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/TableInfo.java @@ -0,0 +1,27 @@ +package ru.vk.itmo.kovalchukvladislav.model; + +public class TableInfo { + private final long recordsCount; + private final long recordsSize; + + public TableInfo(long recordsCount, long recordsSize) { + this.recordsCount = recordsCount; + this.recordsSize = recordsSize; + } + + public long getRecordsCount() { + return recordsCount; + } + + public long getRecordsSize() { + return recordsSize; + } + + @Override + public String toString() { + return "TableInfo{" + + "recordsCount=" + recordsCount + + ", recordsSize=" + recordsSize + + '}'; + } +} From af8a96f568a353ba1639852e5f16d0790fdc088c Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Wed, 29 Nov 2023 23:58:00 +0300 Subject: [PATCH 26/36] fix --- .../AbstractInMemoryDao.java | 44 ----- .../SSTableMemorySegmentWriter.java | 164 ------------------ .../model/MemoryOverflowException.java | 2 + .../model/SimpleDaoLoggerFactory.java | 2 + .../model/StorageIterator.java | 0 .../storage/InMemoryStorage.java | 2 + .../storage/InMemoryStorageImpl.java | 109 ++++++++++++ .../storage/SSTableStorage.java | 5 + .../storage/SSTableStorageImpl.java | 109 ++++++++++++ .../storage/StorageUtil.java | 2 + 10 files changed, 231 insertions(+), 208 deletions(-) delete mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java delete mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/SSTableMemorySegmentWriter.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/model/MemoryOverflowException.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/model/SimpleDaoLoggerFactory.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/model/StorageIterator.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorage.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorage.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java create mode 100644 src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtil.java diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java deleted file mode 100644 index 3f48b9981..000000000 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractInMemoryDao.java +++ /dev/null @@ -1,44 +0,0 @@ -package ru.vk.itmo.kovalchukvladislav; - -import ru.vk.itmo.Dao; -import ru.vk.itmo.Entry; - -import java.util.Comparator; -import java.util.Iterator; -import java.util.concurrent.ConcurrentNavigableMap; -import java.util.concurrent.ConcurrentSkipListMap; - -public abstract class AbstractInMemoryDao> implements Dao { - protected final ConcurrentNavigableMap dao; - protected final Comparator comparator; - - protected AbstractInMemoryDao(Comparator comparator) { - this.dao = new ConcurrentSkipListMap<>(comparator); - this.comparator = comparator; - } - - @Override - public Iterator get(D from, D to) { - ConcurrentNavigableMap subMap; - if (from == null && to == null) { - subMap = dao; - } else if (from == null) { - subMap = dao.headMap(to); - } else if (to == null) { - subMap = dao.tailMap(from); - } else { - subMap = dao.subMap(from, to); - } - return subMap.values().iterator(); - } - - @Override - public E get(D key) { - return dao.get(key); - } - - @Override - public void upsert(E entry) { - dao.put(entry.key(), entry); - } -} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/SSTableMemorySegmentWriter.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/SSTableMemorySegmentWriter.java deleted file mode 100644 index 077a90c75..000000000 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/SSTableMemorySegmentWriter.java +++ /dev/null @@ -1,164 +0,0 @@ -package ru.vk.itmo.kovalchukvladislav; - -import ru.vk.itmo.Entry; -import ru.vk.itmo.kovalchukvladislav.model.EntryExtractor; -import ru.vk.itmo.kovalchukvladislav.model.TableInfo; - -import java.io.IOException; -import java.lang.foreign.Arena; -import java.lang.foreign.MemorySegment; -import java.lang.foreign.ValueLayout; -import java.nio.channels.FileChannel; -import java.nio.file.Files; -import java.nio.file.OpenOption; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.nio.file.StandardOpenOption; -import java.util.Comparator; -import java.util.Iterator; -import java.util.logging.Level; -import java.util.logging.Logger; -import java.util.stream.Stream; - -public class SSTableMemorySegmentWriter> { - private static final Logger logger = Logger.getLogger(SSTableMemorySegmentWriter.class.getSimpleName()); - private static final OpenOption[] WRITE_OPTIONS = new OpenOption[] { - StandardOpenOption.READ, - StandardOpenOption.WRITE, - StandardOpenOption.TRUNCATE_EXISTING, - StandardOpenOption.CREATE - }; - - private static final StandardCopyOption[] MOVE_OPTIONS = new StandardCopyOption[] { - StandardCopyOption.ATOMIC_MOVE, - StandardCopyOption.REPLACE_EXISTING - }; - - private final Path basePath; - private final String metadataFilename; - private final String dbFilenamePrefix; - private final String offsetsFilenamePrefix; - private final EntryExtractor extractor; - - public SSTableMemorySegmentWriter(Path basePath, String dbFilenamePrefix, String offsetsFilenamePrefix, - String metadataFilename, EntryExtractor extractor) { - this.basePath = basePath; - this.dbFilenamePrefix = dbFilenamePrefix; - this.offsetsFilenamePrefix = offsetsFilenamePrefix; - this.metadataFilename = metadataFilename; - this.extractor = extractor; - logger.setLevel(Level.OFF); // чтобы не засорять вывод в гитхабе, если такое возможно - } - - public void compact(Iterator iterator, TableInfo info) throws IOException { - Path tempDirectory = Files.createTempDirectory(null); - String timestamp = String.valueOf(System.currentTimeMillis()); - - Path newSSTable = basePath.resolve(dbFilenamePrefix + timestamp); - Path newOffsetsTable = basePath.resolve(offsetsFilenamePrefix + timestamp); - Path tmpSSTable = tempDirectory.resolve(dbFilenamePrefix + timestamp); - Path tmpOffsetsTable = tempDirectory.resolve(offsetsFilenamePrefix + timestamp); - - logger.info(() -> String.format("Compacting started to dir %s, timestamp %s, info %s", - tempDirectory, timestamp, info)); - - try { - writeData(tempDirectory, timestamp, iterator, info); - Path tmpMetadata = addSSTableId(tempDirectory, timestamp); - Path newMetadata = basePath.resolve(metadataFilename); - - Files.move(tmpSSTable, newSSTable, MOVE_OPTIONS); - Files.move(tmpOffsetsTable, newOffsetsTable, MOVE_OPTIONS); - Files.move(tmpMetadata, newMetadata, MOVE_OPTIONS); - } catch (Exception e) { - deleteUnusedFiles(newSSTable, newOffsetsTable); - throw e; - } finally { - deleteUnusedFiles(tempDirectory); - } - logger.info(() -> String.format("Compacted to dir %s, timestamp %s", basePath, timestamp)); - } - - public void flush(Iterator iterator, TableInfo info) throws IOException { - Path tempDirectory = Files.createTempDirectory(null); - String timestamp = String.valueOf(System.currentTimeMillis()); - - Path newSSTable = basePath.resolve(dbFilenamePrefix + timestamp); - Path newOffsetsTable = basePath.resolve(offsetsFilenamePrefix + timestamp); - Path tmpSSTable = tempDirectory.resolve(dbFilenamePrefix + timestamp); - Path tmpOffsetsTable = tempDirectory.resolve(offsetsFilenamePrefix + timestamp); - - logger.info(() -> String.format("Flushing started to dir %s, timestamp %s, info %s", - tempDirectory, timestamp, info)); - try { - writeData(tempDirectory, timestamp, iterator, info); - - Files.move(tmpSSTable, newSSTable, MOVE_OPTIONS); - Files.move(tmpOffsetsTable, newOffsetsTable, MOVE_OPTIONS); - addSSTableId(basePath, timestamp); - } catch (Exception e) { - deleteUnusedFiles(newSSTable, newOffsetsTable); - throw e; - } finally { - deleteUnusedFilesInDirectory(tempDirectory); - } - logger.info(() -> String.format("Flushed to dir %s, timestamp %s", basePath, timestamp)); - } - - // Удаление ненужных файлов не является чем то критически важным - // Если произойдет исключение, лучше словить и вывести в лог, чем останавливать работу - public void deleteUnusedFiles(Path... files) { - for (Path file : files) { - try { - boolean deleted = Files.deleteIfExists(file); - if (deleted) { - logger.info(() -> String.format("File %s was deleted", file)); - } else { - logger.severe(() -> String.format("File %s not deleted", file)); - } - } catch (IOException e) { - logger.severe(() -> String.format("Error while deleting file %s: %s", file, e.getMessage())); - } - } - } - - private void deleteUnusedFilesInDirectory(Path directory) { - try (Stream files = Files.walk(directory)) { - Path[] array = files.sorted(Comparator.reverseOrder()).toArray(Path[]::new); - deleteUnusedFiles(array); - } catch (Exception e) { - logger.severe(() -> String.format("Error while deleting directory %s: %s", directory, e.getMessage())); - } - } - - private void writeData(Path path, String timestamp, Iterator daoIterator, TableInfo info) throws IOException { - Path dbPath = path.resolve(dbFilenamePrefix + timestamp); - Path offsetsPath = path.resolve(offsetsFilenamePrefix + timestamp); - - try (FileChannel db = FileChannel.open(dbPath, WRITE_OPTIONS); - FileChannel offsets = FileChannel.open(offsetsPath, WRITE_OPTIONS); - Arena arena = Arena.ofConfined()) { - - long offsetsSize = info.getRecordsCount() * Long.BYTES; - MemorySegment fileSegment = db.map(FileChannel.MapMode.READ_WRITE, 0, info.getRecordsSize(), arena); - MemorySegment offsetsSegment = offsets.map(FileChannel.MapMode.READ_WRITE, 0, offsetsSize, arena); - - int i = 0; - long offset = 0; - while (daoIterator.hasNext()) { - E entry = daoIterator.next(); - offsetsSegment.setAtIndex(ValueLayout.JAVA_LONG_UNALIGNED, i, offset); - offset = extractor.writeEntry(entry, fileSegment, offset); - i += 1; - } - - fileSegment.load(); - offsetsSegment.load(); - } - } - - private Path addSSTableId(Path path, String id) throws IOException { - return Files.writeString(path.resolve(metadataFilename), id + System.lineSeparator(), - StandardOpenOption.WRITE, StandardOpenOption.APPEND, StandardOpenOption.CREATE); - } -} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/MemoryOverflowException.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/MemoryOverflowException.java new file mode 100644 index 000000000..81f967f8e --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/MemoryOverflowException.java @@ -0,0 +1,2 @@ +package ru.vk.itmo.kovalchukvladislav.model;public class MemoryOverflowException { +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/SimpleDaoLoggerFactory.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/SimpleDaoLoggerFactory.java new file mode 100644 index 000000000..523cc6eeb --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/SimpleDaoLoggerFactory.java @@ -0,0 +1,2 @@ +package ru.vk.itmo.kovalchukvladislav.model;public class Logger { +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/StorageIterator.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/StorageIterator.java new file mode 100644 index 000000000..e69de29bb diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorage.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorage.java new file mode 100644 index 000000000..96dc99099 --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorage.java @@ -0,0 +1,2 @@ +package ru.vk.itmo.kovalchukvladislav.storage;public interface InMemoryStorage { +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java new file mode 100644 index 000000000..6ee822b02 --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java @@ -0,0 +1,109 @@ +package ru.vk.itmo.kovalchukvladislav.storage; + +import ru.vk.itmo.Entry; +import ru.vk.itmo.kovalchukvladislav.SSTableMemorySegmentWriter; +import ru.vk.itmo.kovalchukvladislav.model.EntryExtractor; +import ru.vk.itmo.kovalchukvladislav.model.TableInfo; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ConcurrentNavigableMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +public class InMemoryStorage> { + private final long flushThresholdBytes; + private final EntryExtractor extractor; + private final SSTableMemorySegmentWriter writer; + private final ReadWriteLock daoChangeLock = new ReentrantReadWriteLock(); + private final ExecutorService backgroundQueue = Executors.newSingleThreadExecutor(); + + // Следущие три поля меняются одновременно и атомарно с daoChangeLock + private final AtomicLong daoSize = new AtomicLong(0); + private volatile ConcurrentNavigableMap dao; + private volatile ConcurrentNavigableMap flushingDao; + private final EntryExtractor extractor; + private final SSTableMemorySegmentWriter writer; + + public InMemoryStorage(long flushThresholdBytes, + EntryExtractor extractor, + SSTableMemorySegmentWriter writer) { + this.flushThresholdBytes = flushThresholdBytes; + this.extractor = extractor; + this.writer = writer; + this.dao = new ConcurrentSkipListMap<>(extractor); + } + + public E get(D key) { + daoChangeLock.readLock().lock(); + try { + E entry = dao.get(key); + if (entry == null && flushingDao != null) { + entry = flushingDao.get(key); + } + return entry; + } finally { + daoChangeLock.readLock().unlock(); + } + } + + public void upsert(E entry) { + daoChangeLock.readLock().lock(); + try { + E oldEntry = dao.put(entry.key(), entry); + long delta = extractor.size(entry) - extractor.size(oldEntry); + daoSize.addAndGet(delta); + } finally { + daoChangeLock.readLock().unlock(); + } + } + + public List> getIterators(D from, D to) { + daoChangeLock.readLock().lock(); + try { + List> result = new ArrayList<>(2); + if (dao != null) { + result.add(getIteratorDao(dao, from, to)); + } + if (flushingDao != null) { + result.add(getIteratorDao(flushingDao, from, to)); + } + return result; + } finally { + daoChangeLock.readLock().unlock(); + } + } + + public void flush() { + daoChangeLock.writeLock().lock(); + try { + this.flushingDao = dao; + long flushingDaoSize = daoSize.getAndSet(0); + this.dao = new ConcurrentSkipListMap<>(extractor); + } finally { + daoChangeLock.writeLock().unlock(); + } + writer.flush(flushingDao.values().iterator(), new TableInfo()); + + } + + private Iterator getIteratorDao(ConcurrentNavigableMap dao, D from, D to) { + ConcurrentNavigableMap subMap; + if (from == null && to == null) { + subMap = dao; + } else if (from == null) { + subMap = dao.headMap(to); + } else if (to == null) { + subMap = dao.tailMap(from); + } else { + subMap = dao.subMap(from, to); + } + return subMap.values().iterator(); + } +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorage.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorage.java new file mode 100644 index 000000000..5b3ddc419 --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorage.java @@ -0,0 +1,5 @@ +package ru.vk.itmo.kovalchukvladislav.storage; + +public interface SSTableStorage> { +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java new file mode 100644 index 000000000..037978dff --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java @@ -0,0 +1,109 @@ +package ru.vk.itmo.kovalchukvladislav.storage; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.lang.foreign.Arena; +import java.lang.foreign.MemorySegment; +import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.logging.Logger; +import ru.vk.itmo.Entry; +import ru.vk.itmo.kovalchukvladislav.model.SimpleDaoLoggerFactory; + +public class SSTableStorage> { + private static final Logger logger = SimpleDaoLoggerFactory.createLogger(SSTableStorage.class); + private final Path basePath; + private final String metadataFilename; + private final String dbFilenamePrefix; + private final String offsetsFilenamePrefix; + private final Arena arena = Arena.ofShared(); + private final ReadWriteLock storageChangeLock = new ReentrantReadWriteLock(); + private final ExecutorService backgroundQueue = Executors.newSingleThreadExecutor(); + + // Следущие три поля меняются одновременно и атомарно с storageChangeLock + private final AtomicLong storagesCount = new AtomicLong(0); + private final List dbMappedSegments; + private final List offsetMappedSegments; + + public SSTableStorage(Path basePath, + String metadataFilename, + String dbFilenamePrefix, + String offsetsFilenamePrefix) throws IOException { + this.basePath = basePath; + this.metadataFilename = metadataFilename; + this.dbFilenamePrefix = dbFilenamePrefix; + this.offsetsFilenamePrefix = offsetsFilenamePrefix; + if (!Files.exists(basePath)) { + Files.createDirectory(basePath); + } + } + + private void reloadFilesAndMapToSegment() throws IOException { + logger.info(() -> String.format("Reloading files from %s", basePath)); + List ssTableIds = getSSTableIds(); + List + for (String ssTableId : ssTableIds) { + readFileAndMapToSegment(ssTableId); + } + logger.info(() -> String.format("Reloaded %d files", storagesCount)); + } + + private void readFileAndMapToSegment(List dbMappedResult, + List offsetMappedResult, + String timestamp) throws IOException { + Path dbPath = basePath.resolve(dbFilenamePrefix + timestamp); + Path offsetsPath = basePath.resolve(offsetsFilenamePrefix + timestamp); + if (!Files.exists(dbPath) || !Files.exists(offsetsPath)) { + throw new FileNotFoundException("File under path " + dbPath + " or " + offsetsPath + " doesn't exists"); + } + + logger.info(() -> String.format("Reading files with timestamp %s", timestamp)); + + try (FileChannel dbChannel = FileChannel.open(dbPath, StandardOpenOption.READ); + FileChannel offsetChannel = FileChannel.open(offsetsPath, StandardOpenOption.READ)) { + + MemorySegment db = dbChannel.map(FileChannel.MapMode.READ_ONLY, 0, Files.size(dbPath), arena); + MemorySegment offsets = offsetChannel.map(FileChannel.MapMode.READ_ONLY, 0, Files.size(offsetsPath), arena); + dbMappedSegments.add(db); + offsetMappedSegments.add(offsets); + storagesCount++; + } + logger.info(() -> String.format("Successfully read files with %s timestamp", timestamp)); + } + + private List getSSTableIds() throws IOException { + storageChangeLock.readLock().lock(); + try { + Path metadataPath = basePath.resolve(metadataFilename); + if (!Files.exists(metadataPath)) { + return Collections.emptyList(); + } + return Files.readAllLines(metadataPath, StandardCharsets.UTF_8); + } finally { + storageChangeLock.readLock().unlock(); + } + } + + private Path[] getAllTablesPath() throws IOException { + List ssTableIds = getSSTableIds(); + int size = ssTableIds.size(); + Path[] files = new Path[2 * size]; + + for (int i = 0; i < size; i++) { + String id = ssTableIds.get(i); + files[2 * i] = basePath.resolve(dbFilenamePrefix + id); + files[2 * i + 1] = basePath.resolve(offsetsFilenamePrefix + id); + } + return files; + } +} diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtil.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtil.java new file mode 100644 index 000000000..9a3ccab9f --- /dev/null +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtil.java @@ -0,0 +1,2 @@ +package ru.vk.itmo.kovalchukvladislav.storage;public class StorageUtil { +} From c529e981f8800913babe04c6fe47524658fbeecb Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Mon, 18 Dec 2023 11:51:20 +0300 Subject: [PATCH 27/36] fix codeciamte --- .../AbstractBasedOnSSTableDao.java | 9 ++++----- ...ctory.java => SimpleDaoLoggerUtility.java} | 5 ++++- .../storage/InMemoryStorageImpl.java | 14 ++++++------- .../storage/SSTableStorageImpl.java | 20 +++++++++---------- .../{StorageUtil.java => StorageUtility.java} | 6 +++++- 5 files changed, 30 insertions(+), 24 deletions(-) rename src/main/java/ru/vk/itmo/kovalchukvladislav/model/{SimpleDaoLoggerFactory.java => SimpleDaoLoggerUtility.java} (84%) rename src/main/java/ru/vk/itmo/kovalchukvladislav/storage/{StorageUtil.java => StorageUtility.java} (98%) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java index 7063a091f..d32ff1de9 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java @@ -5,7 +5,7 @@ import ru.vk.itmo.Entry; import ru.vk.itmo.kovalchukvladislav.model.DaoIterator; import ru.vk.itmo.kovalchukvladislav.model.EntryExtractor; -import ru.vk.itmo.kovalchukvladislav.model.SimpleDaoLoggerFactory; +import ru.vk.itmo.kovalchukvladislav.model.SimpleDaoLoggerUtility; import ru.vk.itmo.kovalchukvladislav.storage.InMemoryStorage; import ru.vk.itmo.kovalchukvladislav.storage.InMemoryStorageImpl; import ru.vk.itmo.kovalchukvladislav.storage.SSTableStorage; @@ -25,7 +25,7 @@ import java.util.logging.Logger; public abstract class AbstractBasedOnSSTableDao> implements Dao { - private final Logger logger = SimpleDaoLoggerFactory.createLogger(getClass()); + private final Logger logger = SimpleDaoLoggerUtility.createLogger(getClass()); private static final String DB_FILENAME_PREFIX = "db_"; private static final String METADATA_FILENAME = "metadata"; private static final String OFFSETS_FILENAME_PREFIX = "offsets_"; @@ -93,14 +93,13 @@ public void flush() { } private void submitFlushAndAddSSTable(Callable flushCallable) { - flushQueue.submit(() -> { + flushQueue.execute(() -> { try { String newTimestamp = flushCallable.call(); ssTableStorage.addSSTableId(newTimestamp, true); inMemoryStorage.completeFlush(); } catch (Exception e) { inMemoryStorage.failFlush(); - throw new RuntimeException(e); } }); } @@ -125,7 +124,7 @@ public void close() { @Override public void compact() { - compactQueue.submit(() -> { + compactQueue.execute(() -> { try { ssTableStorage.compact(); } catch (IOException e) { diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/SimpleDaoLoggerFactory.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/SimpleDaoLoggerUtility.java similarity index 84% rename from src/main/java/ru/vk/itmo/kovalchukvladislav/model/SimpleDaoLoggerFactory.java rename to src/main/java/ru/vk/itmo/kovalchukvladislav/model/SimpleDaoLoggerUtility.java index 707d0ea07..eaa99d727 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/SimpleDaoLoggerFactory.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/SimpleDaoLoggerUtility.java @@ -4,7 +4,10 @@ import java.util.logging.Logger; // Логгер, который я включаю локально, но выключаю перед пушем, чтобы он не засорял гитхаб. -public class SimpleDaoLoggerFactory { +public class SimpleDaoLoggerUtility { + private SimpleDaoLoggerUtility() { + } + public static Logger createLogger(Class clazz) { Logger logger = Logger.getLogger(clazz.getSimpleName()); logger.setLevel(Level.OFF); diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java index 02d40fd7b..cec0f4a7c 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java @@ -3,7 +3,7 @@ import ru.vk.itmo.Entry; import ru.vk.itmo.kovalchukvladislav.model.EntryExtractor; import ru.vk.itmo.kovalchukvladislav.model.MemoryOverflowException; -import ru.vk.itmo.kovalchukvladislav.model.SimpleDaoLoggerFactory; +import ru.vk.itmo.kovalchukvladislav.model.SimpleDaoLoggerUtility; import ru.vk.itmo.kovalchukvladislav.model.TableInfo; import java.io.IOException; @@ -21,7 +21,7 @@ import java.util.logging.Logger; public class InMemoryStorageImpl> implements InMemoryStorage { - private static final Logger logger = SimpleDaoLoggerFactory.createLogger(InMemoryStorageImpl.class); + private static final Logger logger = SimpleDaoLoggerUtility.createLogger(InMemoryStorageImpl.class); private final long flushThresholdBytes; private final EntryExtractor extractor; @@ -214,18 +214,18 @@ private String flushImpl(Iterator immutableCollectionIterator, TableInfo info Path tmpSSTable = tempDirectory.resolve(dbPrefix + timestamp); Path tmpOffsets = tempDirectory.resolve(offsetsPrefix + timestamp); - StorageUtil.writeData(tmpSSTable, tmpOffsets, immutableCollectionIterator, info, extractor); + StorageUtility.writeData(tmpSSTable, tmpOffsets, immutableCollectionIterator, info, extractor); - newSSTable = Files.move(tmpSSTable, basePath.resolve(dbPrefix + timestamp), StorageUtil.MOVE_OPTIONS); - Files.move(tmpOffsets, basePath.resolve(offsetsPrefix + timestamp), StorageUtil.MOVE_OPTIONS); + newSSTable = Files.move(tmpSSTable, basePath.resolve(dbPrefix + timestamp), StorageUtility.MOVE_OPTIONS); + Files.move(tmpOffsets, basePath.resolve(offsetsPrefix + timestamp), StorageUtility.MOVE_OPTIONS); } catch (Exception e) { // newOffsets чистить не надо. Это последняя операция, если исключение то он точно не перемещен. if (newSSTable != null) { - StorageUtil.deleteUnusedFiles(logger, newSSTable); + StorageUtility.deleteUnusedFiles(logger, newSSTable); } throw e; } finally { - StorageUtil.deleteUnusedFilesInDirectory(logger, tempDirectory); + StorageUtility.deleteUnusedFilesInDirectory(logger, tempDirectory); } logger.info(() -> String.format("Flushed to dir %s, timestamp %s", basePath, timestamp)); return timestamp; diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java index b16415702..26fc725d0 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java @@ -3,7 +3,7 @@ import ru.vk.itmo.Entry; import ru.vk.itmo.kovalchukvladislav.model.DaoIterator; import ru.vk.itmo.kovalchukvladislav.model.EntryExtractor; -import ru.vk.itmo.kovalchukvladislav.model.SimpleDaoLoggerFactory; +import ru.vk.itmo.kovalchukvladislav.model.SimpleDaoLoggerUtility; import ru.vk.itmo.kovalchukvladislav.model.StorageIterator; import ru.vk.itmo.kovalchukvladislav.model.TableInfo; @@ -28,7 +28,7 @@ import java.util.logging.Logger; public class SSTableStorageImpl> implements SSTableStorage { - private static final Logger logger = SimpleDaoLoggerFactory.createLogger(SSTableStorageImpl.class); + private static final Logger logger = SimpleDaoLoggerUtility.createLogger(SSTableStorageImpl.class); private final Path basePath; private final String metadataFilename; private final String dataPrefix; @@ -165,7 +165,7 @@ public List> getIterators(D from, D to) { public void close() { if (arena.scope().isAlive()) { arena.close(); - StorageUtil.deleteUnusedFiles(logger, filesToDelete.toArray(Path[]::new)); + StorageUtility.deleteUnusedFiles(logger, filesToDelete.toArray(Path[]::new)); } } @@ -198,23 +198,23 @@ private void compactAndAddToMetadata() throws IOException { logger.info(() -> String.format("Compacting started to dir %s, timestamp %s, info %s", tempDirectory, timestamp, info)); - StorageUtil.writeData(tmpSSTable, tmpOffsetsTable, iterator, info, extractor); + StorageUtility.writeData(tmpSSTable, tmpOffsetsTable, iterator, info, extractor); Path tmpMetadata = addSSTableId(tempDirectory, timestamp); Path newMetadata = basePath.resolve(metadataFilename); - newSSTable = Files.move(tmpSSTable, basePath.resolve(dataPrefix + timestamp), StorageUtil.MOVE_OPTIONS); + newSSTable = Files.move(tmpSSTable, basePath.resolve(dataPrefix + timestamp), StorageUtility.MOVE_OPTIONS); newOffsetsTable = Files.move(tmpOffsetsTable, basePath.resolve(offsetsPrefix + timestamp), - StorageUtil.MOVE_OPTIONS); - Files.move(tmpMetadata, newMetadata, StorageUtil.MOVE_OPTIONS); + StorageUtility.MOVE_OPTIONS); + Files.move(tmpMetadata, newMetadata, StorageUtility.MOVE_OPTIONS); } catch (Exception e) { if (newOffsetsTable != null) { - StorageUtil.deleteUnusedFiles(logger, newSSTable, newOffsetsTable); + StorageUtility.deleteUnusedFiles(logger, newSSTable, newOffsetsTable); } else if (newSSTable != null) { - StorageUtil.deleteUnusedFiles(logger, newSSTable); + StorageUtility.deleteUnusedFiles(logger, newSSTable); } throw e; } finally { - StorageUtil.deleteUnusedFiles(logger, tempDirectory); + StorageUtility.deleteUnusedFiles(logger, tempDirectory); } logger.info(() -> String.format("Compacted to dir %s, timestamp %s", basePath, timestamp)); } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtil.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java similarity index 98% rename from src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtil.java rename to src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java index 4487402ab..d15445852 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtil.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java @@ -19,11 +19,12 @@ import java.util.logging.Logger; import java.util.stream.Stream; -public class StorageUtil { +public class StorageUtility { public static final StandardCopyOption[] MOVE_OPTIONS = new StandardCopyOption[] { StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING }; + private static final OpenOption[] WRITE_OPTIONS = new OpenOption[] { StandardOpenOption.READ, StandardOpenOption.WRITE, @@ -31,6 +32,9 @@ public class StorageUtil { StandardOpenOption.CREATE }; + private StorageUtility() { + } + // Удаление ненужных файлов не является чем то критически важным // Если произойдет исключение, лучше словить и вывести в лог, чем останавливать работу public static void deleteUnusedFiles(Logger logger, Path... files) { From a7012c407d2e40f4114eaf135a486e877c1dcbdd Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Tue, 26 Dec 2023 00:05:59 +0300 Subject: [PATCH 28/36] fix --- .../itmo/kovalchukvladislav/model/SimpleDaoLoggerUtility.java | 2 +- .../vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java | 2 ++ .../ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/SimpleDaoLoggerUtility.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/SimpleDaoLoggerUtility.java index eaa99d727..7457d5091 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/SimpleDaoLoggerUtility.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/SimpleDaoLoggerUtility.java @@ -4,7 +4,7 @@ import java.util.logging.Logger; // Логгер, который я включаю локально, но выключаю перед пушем, чтобы он не засорял гитхаб. -public class SimpleDaoLoggerUtility { +public final class SimpleDaoLoggerUtility { private SimpleDaoLoggerUtility() { } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java index cec0f4a7c..cd41cb996 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java @@ -42,6 +42,7 @@ public InMemoryStorageImpl(EntryExtractor extractor, long flushThresholdBy * В операциях на чтение точного значения daoSize (flush) следует использовать writeLock. * writeLock() гарантирует что в настоящее время нет readLock'ов, а значит и незаконченных операций изменения size. */ + @SuppressWarnings("unused") private record DaoState>(ConcurrentNavigableMap dao, AtomicLong daoSize) { public static > DaoState createEmpty(EntryExtractor extractor) { return new DaoState<>(new ConcurrentSkipListMap<>(extractor), new AtomicLong(0)); @@ -52,6 +53,7 @@ public FlushingDaoState toFlushedState() { } } + @SuppressWarnings("unused") private record FlushingDaoState(ConcurrentNavigableMap dao, long daoSize, FlushingState flushingState) { public boolean isRunning() { return flushingState == FlushingState.RUNNING; diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java index d15445852..b42149595 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java @@ -19,7 +19,7 @@ import java.util.logging.Logger; import java.util.stream.Stream; -public class StorageUtility { +public final class StorageUtility { public static final StandardCopyOption[] MOVE_OPTIONS = new StandardCopyOption[] { StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING From c63ff7aaa9e2b2a182fea8b8fa153e6b70783f7e Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Tue, 26 Dec 2023 00:08:15 +0300 Subject: [PATCH 29/36] fix again --- .../storage/InMemoryStorageImpl.java | 9 +++++++-- .../storage/SSTableStorageImpl.java | 11 ++++++++--- .../kovalchukvladislav/storage/StorageUtility.java | 5 ----- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java index cd41cb996..586dd80dd 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java @@ -9,6 +9,7 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.Iterator; import java.util.List; @@ -21,6 +22,10 @@ import java.util.logging.Logger; public class InMemoryStorageImpl> implements InMemoryStorage { + private static final StandardCopyOption[] MOVE_OPTIONS = new StandardCopyOption[] { + StandardCopyOption.ATOMIC_MOVE, + StandardCopyOption.REPLACE_EXISTING + }; private static final Logger logger = SimpleDaoLoggerUtility.createLogger(InMemoryStorageImpl.class); private final long flushThresholdBytes; private final EntryExtractor extractor; @@ -218,8 +223,8 @@ private String flushImpl(Iterator immutableCollectionIterator, TableInfo info StorageUtility.writeData(tmpSSTable, tmpOffsets, immutableCollectionIterator, info, extractor); - newSSTable = Files.move(tmpSSTable, basePath.resolve(dbPrefix + timestamp), StorageUtility.MOVE_OPTIONS); - Files.move(tmpOffsets, basePath.resolve(offsetsPrefix + timestamp), StorageUtility.MOVE_OPTIONS); + newSSTable = Files.move(tmpSSTable, basePath.resolve(dbPrefix + timestamp), MOVE_OPTIONS); + Files.move(tmpOffsets, basePath.resolve(offsetsPrefix + timestamp), MOVE_OPTIONS); } catch (Exception e) { // newOffsets чистить не надо. Это последняя операция, если исключение то он точно не перемещен. if (newSSTable != null) { diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java index 26fc725d0..2bebd62f6 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java @@ -15,6 +15,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.Collections; @@ -28,6 +29,10 @@ import java.util.logging.Logger; public class SSTableStorageImpl> implements SSTableStorage { + private static final StandardCopyOption[] MOVE_OPTIONS = new StandardCopyOption[] { + StandardCopyOption.ATOMIC_MOVE, + StandardCopyOption.REPLACE_EXISTING + }; private static final Logger logger = SimpleDaoLoggerUtility.createLogger(SSTableStorageImpl.class); private final Path basePath; private final String metadataFilename; @@ -202,10 +207,10 @@ private void compactAndAddToMetadata() throws IOException { Path tmpMetadata = addSSTableId(tempDirectory, timestamp); Path newMetadata = basePath.resolve(metadataFilename); - newSSTable = Files.move(tmpSSTable, basePath.resolve(dataPrefix + timestamp), StorageUtility.MOVE_OPTIONS); + newSSTable = Files.move(tmpSSTable, basePath.resolve(dataPrefix + timestamp), MOVE_OPTIONS); newOffsetsTable = Files.move(tmpOffsetsTable, basePath.resolve(offsetsPrefix + timestamp), - StorageUtility.MOVE_OPTIONS); - Files.move(tmpMetadata, newMetadata, StorageUtility.MOVE_OPTIONS); + MOVE_OPTIONS); + Files.move(tmpMetadata, newMetadata, MOVE_OPTIONS); } catch (Exception e) { if (newOffsetsTable != null) { StorageUtility.deleteUnusedFiles(logger, newSSTable, newOffsetsTable); diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java index b42149595..973faff0f 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java @@ -20,11 +20,6 @@ import java.util.stream.Stream; public final class StorageUtility { - public static final StandardCopyOption[] MOVE_OPTIONS = new StandardCopyOption[] { - StandardCopyOption.ATOMIC_MOVE, - StandardCopyOption.REPLACE_EXISTING - }; - private static final OpenOption[] WRITE_OPTIONS = new OpenOption[] { StandardOpenOption.READ, StandardOpenOption.WRITE, From b74b7787a4e38d3a7078fc7f87f0367c062b2a52 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Tue, 26 Dec 2023 00:12:09 +0300 Subject: [PATCH 30/36] unused fix --- .../itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java | 2 ++ .../itmo/kovalchukvladislav/storage/SSTableStorageImpl.java | 4 ++++ .../ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java | 1 - 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java index 586dd80dd..81805107c 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java @@ -48,6 +48,7 @@ public InMemoryStorageImpl(EntryExtractor extractor, long flushThresholdBy * writeLock() гарантирует что в настоящее время нет readLock'ов, а значит и незаконченных операций изменения size. */ @SuppressWarnings("unused") + // Компилятор ругается на unused переменные внутри record, хотя они очень даже used private record DaoState>(ConcurrentNavigableMap dao, AtomicLong daoSize) { public static > DaoState createEmpty(EntryExtractor extractor) { return new DaoState<>(new ConcurrentSkipListMap<>(extractor), new AtomicLong(0)); @@ -59,6 +60,7 @@ public FlushingDaoState toFlushedState() { } @SuppressWarnings("unused") + // Компилятор ругается на unused переменные внутри record, хотя они очень даже used private record FlushingDaoState(ConcurrentNavigableMap dao, long daoSize, FlushingState flushingState) { public boolean isRunning() { return flushingState == FlushingState.RUNNING; diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java index 2bebd62f6..6827d86f9 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java @@ -62,6 +62,8 @@ public SSTableStorageImpl(Path basePath, this.stateRef.set(state); } + @SuppressWarnings("unused") + // Компилятор ругается на unused переменные внутри record, хотя они очень даже used private record State(List ssTableIds, List data, List offsets) { public int getCount() { return ssTableIds.size(); @@ -70,6 +72,8 @@ public int getCount() { // Вызывается из фонового flush. Можем позволить наглый writeLock на весь метод @Override + @SuppressWarnings("unused") + // Компилятор ругается на unused ignoredPath, хотя в названии переменной есть unused public void addSSTableId(String id, boolean needRefresh) throws IOException { reloadSSTableLock.writeLock().lock(); try { diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java index 973faff0f..f5b482307 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java @@ -12,7 +12,6 @@ import java.nio.file.Files; import java.nio.file.OpenOption; import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.nio.file.StandardOpenOption; import java.util.Comparator; import java.util.Iterator; From c031fa61bd9b8ae438d6c2a6f11f08fec562f401 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Tue, 26 Dec 2023 00:24:02 +0300 Subject: [PATCH 31/36] test fix --- .../itmo/kovalchukvladislav/storage/SSTableStorageImpl.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java index 6827d86f9..ef4fdb7d2 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java @@ -184,6 +184,10 @@ public void compact() throws IOException { reloadSSTableLock.writeLock().lock(); try { List ssTableIds = stateRef.get().ssTableIds; + if (ssTableIds.size() <= 1) { + logger.info("SSTables <= 1, not compacting: " + ssTableIds); + return; + } compactAndAddToMetadata(); reloadSSTableIds(readSSTableIds()); filesToDelete.addAll(convertSSTableIdsToPath(ssTableIds)); From 16d1bb36601f0667a0c309590ae265d40bec32ca Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Tue, 26 Dec 2023 02:39:08 +0300 Subject: [PATCH 32/36] final fix --- .../AbstractBasedOnSSTableDao.java | 18 ++- .../storage/SSTableStorageImpl.java | 150 ++++++++---------- 2 files changed, 84 insertions(+), 84 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java index d32ff1de9..872d49869 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java @@ -34,13 +34,14 @@ public abstract class AbstractBasedOnSSTableDao> implement private final long flushThresholdBytes; private final EntryExtractor extractor; private final AtomicBoolean isClosed = new AtomicBoolean(false); + private final AtomicBoolean isFlushingOrCompacting = new AtomicBoolean(false); private final ExecutorService flushQueue = Executors.newSingleThreadExecutor(); private final ExecutorService compactQueue = Executors.newSingleThreadExecutor(); /** * В get(), upsert() и compact() для inMemoryStorage и ssTableStorage не требуется синхронизация между собой. - * Исключение составляет только flush(). - * Следует проследить что на любом этапе flush() оба стораджа в сумме будут иметь полные данные. + * Исключение составляет только flush() и compact(). + * Следует проследить что на любом этапе оба стораджа в сумме будут иметь полные данные. */ private final InMemoryStorage inMemoryStorage; private final SSTableStorage ssTableStorage; @@ -82,11 +83,16 @@ public void upsert(E entry) { @Override public void flush() { + if (!isFlushingOrCompacting.compareAndSet(false, true)) { + logger.info("Flush or compact already in process"); + return; + } Callable flushCallable = inMemoryStorage.prepareFlush( basePath, DB_FILENAME_PREFIX, OFFSETS_FILENAME_PREFIX); if (flushCallable == null) { + isFlushingOrCompacting.set(false); return; } submitFlushAndAddSSTable(flushCallable); @@ -100,6 +106,8 @@ private void submitFlushAndAddSSTable(Callable flushCallable) { inMemoryStorage.completeFlush(); } catch (Exception e) { inMemoryStorage.failFlush(); + } finally { + isFlushingOrCompacting.set(false); } }); } @@ -124,11 +132,17 @@ public void close() { @Override public void compact() { + if (!isFlushingOrCompacting.compareAndSet(false, true)) { + logger.info("Flush or compact already in process"); + return; + } compactQueue.execute(() -> { try { ssTableStorage.compact(); } catch (IOException e) { throw new UncheckedIOException(e); + } finally { + isFlushingOrCompacting.set(false); } }); } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java index ef4fdb7d2..9b5a1555f 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java @@ -23,13 +23,10 @@ import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.logging.Logger; public class SSTableStorageImpl> implements SSTableStorage { - private static final StandardCopyOption[] MOVE_OPTIONS = new StandardCopyOption[] { + private static final StandardCopyOption[] MOVE_OPTIONS = new StandardCopyOption[]{ StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING }; @@ -41,9 +38,8 @@ public class SSTableStorageImpl> implements SSTableStorage private final Arena arena = Arena.ofShared(); private final EntryExtractor extractor; private final Set filesToDelete = ConcurrentHashMap.newKeySet(); - - private final ReadWriteLock reloadSSTableLock = new ReentrantReadWriteLock(); - private final AtomicReference stateRef = new AtomicReference<>(); + // Read-only состояние, не меняется, можем позволить себе не использовать локи + private volatile State state; public SSTableStorageImpl(Path basePath, String metadataFilename, @@ -58,8 +54,7 @@ public SSTableStorageImpl(Path basePath, if (!Files.exists(basePath)) { Files.createDirectory(basePath); } - State state = reloadSSTableIds(readSSTableIds()); - this.stateRef.set(state); + this.state = reloadSSTableIds(); } @SuppressWarnings("unused") @@ -70,29 +65,75 @@ public int getCount() { } } - // Вызывается из фонового flush. Можем позволить наглый writeLock на весь метод + @Override + public E get(D key) { + State currentState = state; + for (int i = currentState.getCount() - 1; i >= 0; i--) { + MemorySegment storage = currentState.data.get(i); + MemorySegment offsets = currentState.offsets.get(i); + + long offset = extractor.findLowerBoundValueOffset(key, storage, offsets); + if (offset == -1) { + continue; + } + D lowerBoundKey = extractor.readValue(storage, offset); + + if (extractor.compare(lowerBoundKey, key) == 0) { + long valueOffset = offset + extractor.size(lowerBoundKey); + D value = extractor.readValue(storage, valueOffset); + return extractor.createEntry(lowerBoundKey, value); + } + } + return null; + } + + @Override + public List> getIterators(D from, D to) { + State currentState = state; + List> iterators = new ArrayList<>(currentState.getCount()); + for (int i = currentState.getCount() - 1; i >= 0; i--) { + MemorySegment storage = currentState.data.get(i); + MemorySegment offsets = currentState.offsets.get(i); + iterators.add(new StorageIterator<>(from, to, storage, offsets, extractor)); + } + return iterators; + } + + // State может поменяться только в addSSTableId (используется при flush() для обновления состояния) и compact(). + // Оба метода никогда не вызываются одновременно из AbstractBasedOnSSTableDao, синхронизация не нужна. @Override @SuppressWarnings("unused") // Компилятор ругается на unused ignoredPath, хотя в названии переменной есть unused public void addSSTableId(String id, boolean needRefresh) throws IOException { - reloadSSTableLock.writeLock().lock(); - try { - Path ignoredPath = addSSTableId(basePath, id); - if (needRefresh) { - State newState = reloadSSTableIds(readSSTableIds()); - stateRef.set(newState); - } - } finally { - reloadSSTableLock.writeLock().unlock(); + Path ignoredPath = addSSTableId(basePath, id); + if (needRefresh) { + state = reloadSSTableIds(); } } - private Path addSSTableId(Path path, String id) throws IOException { - return Files.writeString(path.resolve(metadataFilename), id + System.lineSeparator(), - StandardOpenOption.WRITE, StandardOpenOption.APPEND, StandardOpenOption.CREATE); + @Override + public void compact() throws IOException { + List ssTableIds = state.ssTableIds; + if (ssTableIds.size() <= 1) { + logger.info("SSTables <= 1, not compacting: " + ssTableIds); + return; + } + + compactAndChangeMetadata(); + state = reloadSSTableIds(); + filesToDelete.addAll(convertSSTableIdsToPath(ssTableIds)); + } + + @Override + public void close() { + if (arena.scope().isAlive()) { + arena.close(); + StorageUtility.deleteUnusedFiles(logger, filesToDelete.toArray(Path[]::new)); + } } - private State reloadSSTableIds(List ssTableIds) throws IOException { + private State reloadSSTableIds() throws IOException { + List ssTableIds = readSSTableIds(); logger.info(() -> String.format("Reloading files from %s", basePath)); List newDbMappedSegments = new ArrayList<>(ssTableIds.size()); List newOffsetMappedSegments = new ArrayList<>(ssTableIds.size()); @@ -136,67 +177,12 @@ private void readFileAndMapToSegment(List dbMappedResult, logger.info(() -> String.format("Successfully read files with %s timestamp", timestamp)); } - @Override - public E get(D key) { - State state = stateRef.get(); - for (int i = state.getCount() - 1; i >= 0; i--) { - MemorySegment storage = state.data.get(i); - MemorySegment offsets = state.offsets.get(i); - - long offset = extractor.findLowerBoundValueOffset(key, storage, offsets); - if (offset == -1) { - continue; - } - D lowerBoundKey = extractor.readValue(storage, offset); - - if (extractor.compare(lowerBoundKey, key) == 0) { - long valueOffset = offset + extractor.size(lowerBoundKey); - D value = extractor.readValue(storage, valueOffset); - return extractor.createEntry(lowerBoundKey, value); - } - } - return null; - } - - @Override - public List> getIterators(D from, D to) { - State state = stateRef.get(); - List> iterators = new ArrayList<>(state.getCount()); - for (int i = state.getCount() - 1; i >= 0; i--) { - MemorySegment storage = state.data.get(i); - MemorySegment offsets = state.offsets.get(i); - iterators.add(new StorageIterator<>(from, to, storage, offsets, extractor)); - } - return iterators; - } - - @Override - public void close() { - if (arena.scope().isAlive()) { - arena.close(); - StorageUtility.deleteUnusedFiles(logger, filesToDelete.toArray(Path[]::new)); - } - } - - @Override - public void compact() throws IOException { - // Опять нагло используем лок на весь метод - reloadSSTableLock.writeLock().lock(); - try { - List ssTableIds = stateRef.get().ssTableIds; - if (ssTableIds.size() <= 1) { - logger.info("SSTables <= 1, not compacting: " + ssTableIds); - return; - } - compactAndAddToMetadata(); - reloadSSTableIds(readSSTableIds()); - filesToDelete.addAll(convertSSTableIdsToPath(ssTableIds)); - } finally { - reloadSSTableLock.writeLock().unlock(); - } + private Path addSSTableId(Path path, String id) throws IOException { + return Files.writeString(path.resolve(metadataFilename), id + System.lineSeparator(), + StandardOpenOption.WRITE, StandardOpenOption.APPEND, StandardOpenOption.CREATE); } - private void compactAndAddToMetadata() throws IOException { + private void compactAndChangeMetadata() throws IOException { Path tempDirectory = Files.createTempDirectory(null); String timestamp = String.valueOf(System.currentTimeMillis()); From 576fba69d3a69536ace3190ba2c375db0a5912bd Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Tue, 26 Dec 2023 03:14:09 +0300 Subject: [PATCH 33/36] final fix --- .../AbstractBasedOnSSTableDao.java | 29 +++++++++---------- .../storage/SSTableStorageImpl.java | 1 - 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java index 872d49869..ff8a8cf1e 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/AbstractBasedOnSSTableDao.java @@ -35,8 +35,7 @@ public abstract class AbstractBasedOnSSTableDao> implement private final EntryExtractor extractor; private final AtomicBoolean isClosed = new AtomicBoolean(false); private final AtomicBoolean isFlushingOrCompacting = new AtomicBoolean(false); - private final ExecutorService flushQueue = Executors.newSingleThreadExecutor(); - private final ExecutorService compactQueue = Executors.newSingleThreadExecutor(); + private final ExecutorService flushOrCompactQueue = Executors.newSingleThreadExecutor(); /** * В get(), upsert() и compact() для inMemoryStorage и ssTableStorage не требуется синхронизация между собой. @@ -99,7 +98,7 @@ public void flush() { } private void submitFlushAndAddSSTable(Callable flushCallable) { - flushQueue.execute(() -> { + flushOrCompactQueue.execute(() -> { try { String newTimestamp = flushCallable.call(); ssTableStorage.addSSTableId(newTimestamp, true); @@ -114,20 +113,20 @@ private void submitFlushAndAddSSTable(Callable flushCallable) { @Override public void close() { - if (isClosed.compareAndSet(false, true)) { - flushQueue.close(); - compactQueue.close(); + if (!isClosed.compareAndSet(false, true)) { + return; + } - try { - String newTimestamp = inMemoryStorage.close(basePath, DB_FILENAME_PREFIX, OFFSETS_FILENAME_PREFIX); - if (newTimestamp != null) { - ssTableStorage.addSSTableId(newTimestamp, false); - } - } catch (Exception e) { - logger.severe(() -> "Error while flushing on close: " + e.getMessage()); + flushOrCompactQueue.close(); + try { + String newTimestamp = inMemoryStorage.close(basePath, DB_FILENAME_PREFIX, OFFSETS_FILENAME_PREFIX); + if (newTimestamp != null) { + ssTableStorage.addSSTableId(newTimestamp, false); } - ssTableStorage.close(); + } catch (Exception e) { + logger.severe(() -> "Error while flushing on close: " + e.getMessage()); } + ssTableStorage.close(); } @Override @@ -136,7 +135,7 @@ public void compact() { logger.info("Flush or compact already in process"); return; } - compactQueue.execute(() -> { + flushOrCompactQueue.execute(() -> { try { ssTableStorage.compact(); } catch (IOException e) { diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java index 9b5a1555f..806c650ee 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java @@ -38,7 +38,6 @@ public class SSTableStorageImpl> implements SSTableStorage private final Arena arena = Arena.ofShared(); private final EntryExtractor extractor; private final Set filesToDelete = ConcurrentHashMap.newKeySet(); - // Read-only состояние, не меняется, можем позволить себе не использовать локи private volatile State state; public SSTableStorageImpl(Path basePath, From 4352e3aae4fa3da2207e37e50a9054a1863a0c58 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Thu, 28 Dec 2023 07:05:00 +0300 Subject: [PATCH 34/36] review fixes --- .../model/StorageIterator.java | 4 + .../storage/InMemoryStorageImpl.java | 119 ++++++++++-------- .../storage/SSTableStorageImpl.java | 1 + .../storage/StorageUtility.java | 3 +- .../MemorySegmentDaoFactory.java | 2 +- 5 files changed, 72 insertions(+), 57 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/StorageIterator.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/StorageIterator.java index 7b2c7c27f..4454f1204 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/model/StorageIterator.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/model/StorageIterator.java @@ -4,6 +4,7 @@ import java.lang.foreign.MemorySegment; import java.lang.foreign.ValueLayout; import java.util.Iterator; +import java.util.NoSuchElementException; public class StorageIterator> implements Iterator { private final EntryExtractor extractor; @@ -80,6 +81,9 @@ public boolean hasNext() { @Override public E next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } E entry = extractor.readEntry(storageSegment, start); start += extractor.size(entry); return entry; diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java index 81805107c..46c1f7c4e 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java @@ -22,7 +22,7 @@ import java.util.logging.Logger; public class InMemoryStorageImpl> implements InMemoryStorage { - private static final StandardCopyOption[] MOVE_OPTIONS = new StandardCopyOption[] { + private static final StandardCopyOption[] MOVE_OPTIONS = new StandardCopyOption[]{ StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING }; @@ -31,14 +31,23 @@ public class InMemoryStorageImpl> implements InMemoryStora private final EntryExtractor extractor; private volatile DaoState daoState; - // Nullable private volatile FlushingDaoState flushingDaoState; private final ReadWriteLock stateLock = new ReentrantReadWriteLock(); public InMemoryStorageImpl(EntryExtractor extractor, long flushThresholdBytes) { this.extractor = extractor; this.flushThresholdBytes = flushThresholdBytes; - this.daoState = DaoState.createEmpty(extractor); + this.daoState = createEmptyDaoState(); + this.flushingDaoState = createEmptyFlushingDaoState(); + + } + + private DaoState createEmptyDaoState() { + return new DaoState<>(new ConcurrentSkipListMap<>(extractor), new AtomicLong(0)); + } + + private FlushingDaoState createEmptyFlushingDaoState() { + return new FlushingDaoState<>(new ConcurrentSkipListMap<>(extractor), 0, FlushingState.NOT_RUNNING); } /** @@ -48,37 +57,32 @@ public InMemoryStorageImpl(EntryExtractor extractor, long flushThresholdBy * writeLock() гарантирует что в настоящее время нет readLock'ов, а значит и незаконченных операций изменения size. */ @SuppressWarnings("unused") - // Компилятор ругается на unused переменные внутри record, хотя они очень даже used private record DaoState>(ConcurrentNavigableMap dao, AtomicLong daoSize) { - public static > DaoState createEmpty(EntryExtractor extractor) { - return new DaoState<>(new ConcurrentSkipListMap<>(extractor), new AtomicLong(0)); - } - - public FlushingDaoState toFlushedState() { + public FlushingDaoState toFlushingRunningDaoState() { return new FlushingDaoState<>(dao, daoSize.get(), FlushingState.RUNNING); } } @SuppressWarnings("unused") - // Компилятор ругается на unused переменные внутри record, хотя они очень даже used private record FlushingDaoState(ConcurrentNavigableMap dao, long daoSize, FlushingState flushingState) { - public boolean isRunning() { - return flushingState == FlushingState.RUNNING; - } - - public boolean isFailed() { - return flushingState == FlushingState.FAILED; - } public FlushingDaoState toFailed() { return new FlushingDaoState<>(dao, daoSize, FlushingState.FAILED); } + + public FlushingDaoState failedToTryAgain() { + if (flushingState != FlushingState.FAILED) { + throw new IllegalStateException("This method should be called when state is failed"); + } + return new FlushingDaoState<>(dao, daoSize, FlushingState.RUNNING); + } } private enum FlushingState { + NOT_RUNNING, RUNNING, FAILED, - // Можно добавить третье состояние: данные выгружены, но произошло исключение при их релоаде в SSTableStorage. + // Можно добавить четвертое состояние: данные выгружены, но произошло исключение при их релоаде в SSTableStorage. // Позволит при повторном flush вернуть уже готовый timestamp, а не флашить опять в новый файл. } @@ -90,13 +94,13 @@ public E get(D key) { stateLock.readLock().lock(); try { dao = daoState.dao; - flushingDao = flushingDaoState == null ? null : flushingDaoState.dao; + flushingDao = flushingDaoState.dao; } finally { stateLock.readLock().unlock(); } E entry = dao.get(key); - if (entry == null && flushingDao != null) { + if (entry == null && !flushingDao.isEmpty()) { entry = flushingDao.get(key); } return entry; @@ -105,23 +109,14 @@ public E get(D key) { /** * Возвращает не точное значение size в угоду перфомансу, иначе будет куча writeLock. * При параллельных upsert(), в одном из них daoSize может не успеть инкрементироваться для вставляемого entry. - * Тем не менее, "приблизительное size" можно использовать для детекта случаев когда надо делать flush(). + * Тем не менее "приблизительное size" можно использовать для детекта случаев когда надо делать flush(). */ @Override public long upsertAndGetSize(E entry) { stateLock.readLock().lock(); try { // "приблизительный" size, не пользуемся write lock'ами в угоду перфомансу - AtomicLong daoSize = daoState.daoSize; - if (daoSize.get() >= flushThresholdBytes && flushingDaoState != null) { - if (flushingDaoState.isRunning()) { - throw new MemoryOverflowException("There no free space." - + "daoSize is max, previous flush running and not completed"); - } else if (flushingDaoState.isFailed()) { - throw new MemoryOverflowException("There no free space." - + "daoSize is max, previous flush was failed. Try to repeat flush"); - } - } + AtomicLong daoSize = getDaoSizeOrThrowMemoryOverflow(flushThresholdBytes, daoState, flushingDaoState); E oldEntry = daoState.dao.put(entry.key(), entry); long delta = extractor.size(entry) - extractor.size(oldEntry); @@ -131,6 +126,24 @@ public long upsertAndGetSize(E entry) { } } + private static > AtomicLong getDaoSizeOrThrowMemoryOverflow(long flushThresholdBytes, + DaoState daoState, + FlushingDaoState flushingDaoState) { + AtomicLong daoSize = daoState.daoSize; + if (daoSize.get() < flushThresholdBytes) { + return daoSize; + } + FlushingState flushingState = flushingDaoState.flushingState(); + if (flushingState == FlushingState.RUNNING) { + throw new MemoryOverflowException("There no free space." + + "daoSize is max, previous flush running and not completed"); + } else if (flushingState == FlushingState.FAILED) { + throw new MemoryOverflowException("There no free space." + + "daoSize is max, previous flush was failed. Try to repeat flush"); + } + return daoSize; + } + @Override public List> getIterators(D from, D to) { ConcurrentNavigableMap dao; @@ -139,18 +152,14 @@ public List> getIterators(D from, D to) { stateLock.readLock().lock(); try { dao = daoState.dao; - flushingDao = flushingDaoState == null ? null : flushingDaoState.dao; + flushingDao = flushingDaoState.dao; } finally { stateLock.readLock().unlock(); } List> result = new ArrayList<>(2); - if (dao != null) { - result.add(getIteratorDao(dao, from, to)); - } - if (flushingDao != null) { - result.add(getIteratorDao(flushingDao, from, to)); - } + result.add(getIteratorDao(dao, from, to)); + result.add(getIteratorDao(flushingDao, from, to)); return result; } @@ -180,22 +189,22 @@ public Callable prepareFlush(Path basePath, String dbFilenamePrefix, Str stateLock.writeLock().lock(); try { - if (flushingDaoState != null && flushingDaoState.isRunning()) { - return null; - } else if (flushingDaoState != null && flushingDaoState.isFailed()) { - newFlushingDaoState = new FlushingDaoState<>( - flushingDaoState.dao, - flushingDaoState.daoSize, - FlushingState.RUNNING); - flushingDaoState = newFlushingDaoState; - } else if (daoState.dao.isEmpty()) { - return null; - } else { - DaoState newDaoState = DaoState.createEmpty(extractor); - newFlushingDaoState = daoState.toFlushedState(); - - flushingDaoState = newFlushingDaoState; - daoState = newDaoState; + switch (flushingDaoState.flushingState) { + case RUNNING -> { + return null; + } + case FAILED -> { + newFlushingDaoState = flushingDaoState.failedToTryAgain(); + flushingDaoState = newFlushingDaoState; + } + case NOT_RUNNING -> { + DaoState newDaoState = createEmptyDaoState(); + newFlushingDaoState = daoState.toFlushingRunningDaoState(); + + flushingDaoState = newFlushingDaoState; + daoState = newDaoState; + } + default -> throw new IllegalStateException("Unexpected state: " + flushingDaoState.flushingState); } } finally { stateLock.writeLock().unlock(); @@ -255,7 +264,7 @@ public void failFlush() { public void completeFlush() { stateLock.writeLock().lock(); try { - flushingDaoState = null; + flushingDaoState = createEmptyFlushingDaoState(); } finally { stateLock.writeLock().unlock(); } diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java index 806c650ee..87c706c96 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java @@ -70,6 +70,7 @@ public E get(D key) { for (int i = currentState.getCount() - 1; i >= 0; i--) { MemorySegment storage = currentState.data.get(i); MemorySegment offsets = currentState.offsets.get(i); + offsets.force(); long offset = extractor.findLowerBoundValueOffset(key, storage, offsets); if (offset == -1) { diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java index f5b482307..62bb7ba74 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/StorageUtility.java @@ -25,6 +25,7 @@ public final class StorageUtility { StandardOpenOption.TRUNCATE_EXISTING, StandardOpenOption.CREATE }; + private static final int OFFSET_SIZE = Long.BYTES; private StorageUtility() { } @@ -63,7 +64,7 @@ public static > void writeData(Path dbPath, Path offsetsPa FileChannel offsets = FileChannel.open(offsetsPath, WRITE_OPTIONS); Arena arena = Arena.ofConfined()) { - long offsetsSize = info.recordsCount() * Long.BYTES; + long offsetsSize = info.recordsCount() * OFFSET_SIZE; MemorySegment fileSegment = db.map(FileChannel.MapMode.READ_WRITE, 0, info.recordsSize(), arena); MemorySegment offsetsSegment = offsets.map(FileChannel.MapMode.READ_WRITE, 0, offsetsSize, arena); diff --git a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java index 7867c9fd4..fe4bd84e0 100644 --- a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java +++ b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java @@ -12,7 +12,7 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; -@DaoFactory(stage = 5) +@DaoFactory(stage = 5, week = 999) public class MemorySegmentDaoFactory implements DaoFactory.Factory> { private static final Charset CHARSET = StandardCharsets.UTF_8; private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; From 6025bb96565c482e5e33cec75d1c5d2658594dc3 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Thu, 28 Dec 2023 07:09:22 +0300 Subject: [PATCH 35/36] remove accidental changes --- .../vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java | 1 - .../itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java index 87c706c96..806c650ee 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/SSTableStorageImpl.java @@ -70,7 +70,6 @@ public E get(D key) { for (int i = currentState.getCount() - 1; i >= 0; i--) { MemorySegment storage = currentState.data.get(i); MemorySegment offsets = currentState.offsets.get(i); - offsets.force(); long offset = extractor.findLowerBoundValueOffset(key, storage, offsets); if (offset == -1) { diff --git a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java index fe4bd84e0..7867c9fd4 100644 --- a/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java +++ b/src/main/java/ru/vk/itmo/test/kovalchukvladislav/MemorySegmentDaoFactory.java @@ -12,7 +12,7 @@ import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; -@DaoFactory(stage = 5, week = 999) +@DaoFactory(stage = 5) public class MemorySegmentDaoFactory implements DaoFactory.Factory> { private static final Charset CHARSET = StandardCharsets.UTF_8; private static final ValueLayout.OfByte VALUE_LAYOUT = ValueLayout.JAVA_BYTE; From eca6c7ac721934422cecedde78c4e44d3c53d7f9 Mon Sep 17 00:00:00 2001 From: Vladislav Kovalchuk Date: Thu, 28 Dec 2023 07:13:17 +0300 Subject: [PATCH 36/36] codeclimate fix --- .../kovalchukvladislav/storage/InMemoryStorageImpl.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java index 46c1f7c4e..54b3eb541 100644 --- a/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java +++ b/src/main/java/ru/vk/itmo/kovalchukvladislav/storage/InMemoryStorageImpl.java @@ -82,7 +82,7 @@ private enum FlushingState { NOT_RUNNING, RUNNING, FAILED, - // Можно добавить четвертое состояние: данные выгружены, но произошло исключение при их релоаде в SSTableStorage. + // Можно добавить еще одно состояние: данные выгружены, но произошло исключение при их релоаде в SSTableStorage. // Позволит при повторном flush вернуть уже готовый timestamp, а не флашить опять в новый файл. } @@ -126,9 +126,8 @@ public long upsertAndGetSize(E entry) { } } - private static > AtomicLong getDaoSizeOrThrowMemoryOverflow(long flushThresholdBytes, - DaoState daoState, - FlushingDaoState flushingDaoState) { + private static > AtomicLong getDaoSizeOrThrowMemoryOverflow( + long flushThresholdBytes, DaoState daoState, FlushingDaoState flushingDaoState) { AtomicLong daoSize = daoState.daoSize; if (daoSize.get() < flushThresholdBytes) { return daoSize;