diff --git a/patches/server/0004-Threaded-Regions.patch b/patches/server/0004-Threaded-Regions.patch
index 2d4823c..f7cd403 100644
--- a/patches/server/0004-Threaded-Regions.patch
+++ b/patches/server/0004-Threaded-Regions.patch
@@ -37,457 +37,155 @@ index f4415f782b32fed25da98e44b172f717c4d46e34..ba7c24b3627a1827721d2462add15fdd
/**
* Atomically removes the head from this queue if it exists, otherwise prevents additions to this queue if no
* head is removed.
-diff --git a/src/main/java/ca/spottedleaf/concurrentutil/lock/ImproveReentrantLock.java b/src/main/java/ca/spottedleaf/concurrentutil/lock/ImproveReentrantLock.java
+diff --git a/src/main/java/ca/spottedleaf/concurrentutil/lock/AreaLock.java b/src/main/java/ca/spottedleaf/concurrentutil/lock/AreaLock.java
new file mode 100644
-index 0000000000000000000000000000000000000000..9df9881396f4a69b51acaae562b12b8ce0a48443
+index 0000000000000000000000000000000000000000..6a155b779914828a0d4199bdfcb0d6fca25e1581
--- /dev/null
-+++ b/src/main/java/ca/spottedleaf/concurrentutil/lock/ImproveReentrantLock.java
-@@ -0,0 +1,139 @@
++++ b/src/main/java/ca/spottedleaf/concurrentutil/lock/AreaLock.java
+@@ -0,0 +1,146 @@
+package ca.spottedleaf.concurrentutil.lock;
+
-+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
-+import java.lang.invoke.VarHandle;
-+import java.util.concurrent.TimeUnit;
-+import java.util.concurrent.locks.AbstractQueuedSynchronizer;
-+import java.util.concurrent.locks.Condition;
-+import java.util.concurrent.locks.Lock;
-+
-+/**
-+ * Implementation of {@link Lock} that should outperform {@link java.util.concurrent.locks.ReentrantLock}.
-+ * The lock is considered a non-fair lock, as specified by {@link java.util.concurrent.locks.ReentrantLock},
-+ * and additionally does not support the creation of Conditions.
-+ *
-+ *
-+ * Specifically, this implementation is careful to avoid synchronisation penalties when multi-acquiring and
-+ * multi-releasing locks from the same thread, and additionally avoids unnecessary synchronisation penalties
-+ * when releasing the lock.
-+ *
-+ */
-+public class ImproveReentrantLock implements Lock {
-+
-+ private final InternalLock lock = new InternalLock();
-+
-+ private static final class InternalLock extends AbstractQueuedSynchronizer {
-+
-+ private volatile Thread owner;
-+ private static final VarHandle OWNER_HANDLE = ConcurrentUtil.getVarHandle(InternalLock.class, "owner", Thread.class);
-+ private int count;
-+
-+ private Thread getOwnerPlain() {
-+ return (Thread)OWNER_HANDLE.get(this);
-+ }
-+
-+ private Thread getOwnerVolatile() {
-+ return (Thread)OWNER_HANDLE.getVolatile(this);
-+ }
-+
-+ private void setOwnerRelease(final Thread to) {
-+ OWNER_HANDLE.setRelease(this, to);
-+ }
-+
-+ private void setOwnerVolatile(final Thread to) {
-+ OWNER_HANDLE.setVolatile(this, to);
-+ }
-+
-+ private Thread compareAndExchangeOwnerVolatile(final Thread expect, final Thread update) {
-+ return (Thread)OWNER_HANDLE.compareAndExchange(this, expect, update);
-+ }
-+
-+ @Override
-+ protected final boolean tryAcquire(int acquires) {
-+ final Thread current = Thread.currentThread();
-+ final Thread owner = this.getOwnerVolatile();
-+
-+ // When trying to blind acquire the lock, using just compare and exchange is faster
-+ // than reading the owner field first - but comes at the cost of performing the compare and exchange
-+ // even if the current thread owns the lock
-+ if ((owner == null && null == this.compareAndExchangeOwnerVolatile(null, current)) || owner == current) {
-+ this.count += acquires;
-+ return true;
-+ }
-+
-+ return false;
-+ }
-+
-+ @Override
-+ protected final boolean tryRelease(int releases) {
-+ if (this.getOwnerPlain() == Thread.currentThread()) {
-+ final int newCount = this.count -= releases;
-+ if (newCount == 0) {
-+ // When the caller, which is release(), attempts to signal the next node, it will use volatile
-+ // to retrieve the node and status.
-+ // Let's say that we have written this field null as release, and then checked for a next node
-+ // using volatile and then determined there are no waiters.
-+ // While a call to tryAcquire() can fail for another thread since the write may not
-+ // publish yet, once the thread adds itself to the waiters list it will synchronise with
-+ // the write to the field, since the volatile write to put the thread on the waiter list
-+ // will synchronise with the volatile read we did earlier to check for any
-+ // waiters.
-+ this.setOwnerRelease(null);
-+ return true;
-+ }
-+ return false;
-+ }
-+ throw new IllegalMonitorStateException();
-+ }
-+ }
-+
-+ /**
-+ * Returns the thread that owns the lock, or returns {@code null} if there is no such thread.
-+ */
-+ public Thread getLockOwner() {
-+ return this.lock.getOwnerVolatile();
-+ }
-+
-+ /**
-+ * Returns whether the current thread owns the lock.
-+ */
-+ public boolean isHeldByCurrentThread() {
-+ return this.lock.getOwnerPlain() == Thread.currentThread();
-+ }
-+
-+ @Override
-+ public void lock() {
-+ this.lock.acquire(1);
-+ }
-+
-+ @Override
-+ public void lockInterruptibly() throws InterruptedException {
-+ if (Thread.interrupted()) {
-+ throw new InterruptedException();
-+ }
-+ this.lock.acquireInterruptibly(1);
-+ }
-+
-+ @Override
-+ public boolean tryLock() {
-+ return this.lock.tryAcquire(1);
-+ }
-+
-+ @Override
-+ public boolean tryLock(final long time, final TimeUnit unit) throws InterruptedException {
-+ if (Thread.interrupted()) {
-+ throw new InterruptedException();
-+ }
-+ return this.lock.tryAcquire(1) || this.lock.tryAcquireNanos(1, unit.toNanos(time));
-+ }
-+
-+ @Override
-+ public void unlock() {
-+ this.lock.release(1);
-+ }
-+
-+ @Override
-+ public Condition newCondition() {
-+ throw new UnsupportedOperationException();
-+ }
-+}
-diff --git a/src/main/java/ca/spottedleaf/concurrentutil/lock/RBLock.java b/src/main/java/ca/spottedleaf/concurrentutil/lock/RBLock.java
-new file mode 100644
-index 0000000000000000000000000000000000000000..793a7326141b7d83395585b3d32b0a7e8a6238a7
---- /dev/null
-+++ b/src/main/java/ca/spottedleaf/concurrentutil/lock/RBLock.java
-@@ -0,0 +1,303 @@
-+package ca.spottedleaf.concurrentutil.lock;
-+
-+import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
-+import java.lang.invoke.VarHandle;
-+import java.util.concurrent.TimeUnit;
-+import java.util.concurrent.locks.Condition;
-+import java.util.concurrent.locks.Lock;
++import it.unimi.dsi.fastutil.longs.Long2ReferenceOpenHashMap;
++import it.unimi.dsi.fastutil.objects.ReferenceOpenHashSet;
++import java.util.ArrayList;
++import java.util.List;
+import java.util.concurrent.locks.LockSupport;
+
-+// ReentrantBiasedLock
-+public final class RBLock implements Lock {
++public final class AreaLock {
+
-+ private volatile LockWaiter owner;
-+ private static final VarHandle OWNER_HANDLE = ConcurrentUtil.getVarHandle(RBLock.class, "owner", LockWaiter.class);
++ private final int coordinateShift;
+
-+ private volatile LockWaiter tail;
-+ private static final VarHandle TAIL_HANDLE = ConcurrentUtil.getVarHandle(RBLock.class, "tail", LockWaiter.class);
++ private final Long2ReferenceOpenHashMap nodesByPosition = new Long2ReferenceOpenHashMap<>(1024, 0.10f);
+
-+ public RBLock() {
-+ // we can have the initial state as if it was locked by this thread, then unlocked
-+ final LockWaiter dummy = new LockWaiter(null, LockWaiter.STATE_BIASED, null);
-+ this.setOwnerPlain(dummy);
-+ // release ensures correct publishing
-+ this.setTailRelease(dummy);
++ public AreaLock(final int coordinateShift) {
++ this.coordinateShift = coordinateShift;
+ }
+
-+ private LockWaiter getOwnerVolatile() {
-+ return (LockWaiter)OWNER_HANDLE.getVolatile(this);
++ private static long key(final int x, final int z) {
++ return ((long)z << 32) | (x & 0xFFFFFFFFL);
+ }
+
-+ private void setOwnerPlain(final LockWaiter value) {
-+ OWNER_HANDLE.set(this, value);
-+ }
++ public Node lock(final int x, final int z, final int radius) {
++ final Thread thread = Thread.currentThread();
++ final int minX = (x - radius) >> this.coordinateShift;
++ final int minZ = (z - radius) >> this.coordinateShift;
++ final int maxX = (x + radius) >> this.coordinateShift;
++ final int maxZ = (z + radius) >> this.coordinateShift;
+
-+ private void setOwnerRelease(final LockWaiter value) {
-+ OWNER_HANDLE.setRelease(this, value);
-+ }
++ final Node node = new Node(x, z, radius, thread);
+
-+
-+
-+ private void setTailOpaque(final LockWaiter newTail) {
-+ TAIL_HANDLE.setOpaque(this, newTail);
-+ }
-+
-+ private void setTailRelease(final LockWaiter newTail) {
-+ TAIL_HANDLE.setRelease(this, newTail);
-+ }
-+
-+ private LockWaiter getTailOpaque() {
-+ return (LockWaiter)TAIL_HANDLE.getOpaque(this);
-+ }
-+
-+
-+ private void appendWaiter(final LockWaiter waiter) {
-+ // Similar to MultiThreadedQueue#appendList
-+ int failures = 0;
-+
-+ for (LockWaiter currTail = this.getTailOpaque(), curr = currTail;;) {
-+ /* It has been experimentally shown that placing the read before the backoff results in significantly greater performance */
-+ /* It is likely due to a cache miss caused by another write to the next field */
-+ final LockWaiter next = curr.getNextVolatile();
-+
-+ for (int i = 0; i < failures; ++i) {
-+ Thread.onSpinWait();
-+ }
-+
-+ if (next == null) {
-+ final LockWaiter compared = curr.compareAndExchangeNextVolatile(null, waiter);
-+
-+ if (compared == null) {
-+ /* Added */
-+ /* Avoid CASing on tail more than we need to */
-+ /* CAS to avoid setting an out-of-date tail */
-+ if (this.getTailOpaque() == currTail) {
-+ this.setTailOpaque(waiter);
++ synchronized (this) {
++ ReferenceOpenHashSet parents = null;
++ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
++ for (int currX = minX; currX <= maxX; ++currX) {
++ final Node dependency = this.nodesByPosition.put(key(currX, currZ), node);
++ if (dependency == null) {
++ continue;
+ }
-+ return;
-+ }
+
-+ ++failures;
-+ curr = compared;
-+ continue;
-+ }
-+
-+ if (curr == currTail) {
-+ /* Tail is likely not up-to-date */
-+ curr = next;
-+ } else {
-+ /* Try to update to tail */
-+ if (currTail == (currTail = this.getTailOpaque())) {
-+ curr = next;
-+ } else {
-+ curr = currTail;
-+ }
-+ }
-+ }
-+ }
-+
-+ // required that expected is already appended to the wait chain
-+ private boolean tryAcquireBiased(final LockWaiter expected) {
-+ final LockWaiter owner = this.getOwnerVolatile();
-+ if (owner.getNextVolatile() == expected && owner.getStateVolatile() == LockWaiter.STATE_BIASED) {
-+ this.setOwnerRelease(expected);
-+ return true;
-+ }
-+ return false;
-+ }
-+
-+ @Override
-+ public void lock() {
-+ final Thread currThread = Thread.currentThread();
-+ final LockWaiter owner = this.getOwnerVolatile();
-+
-+ // try to fast acquire
-+
-+ final LockWaiter acquireObj;
-+ boolean needAppend = true;
-+
-+ if (owner.getNextVolatile() != null) {
-+ // unlikely we are able to fast acquire
-+ acquireObj = new LockWaiter(currThread, 1, null);
-+ } else {
-+ // may be able to fast acquire the lock
-+ if (owner.owner == currThread) {
-+ final int oldState = owner.incrementState();
-+ if (oldState == LockWaiter.STATE_BIASED) {
-+ // in this case, we may not have the lock.
-+ final LockWaiter next = owner.getNextVolatile();
-+ if (next == null) {
-+ // we win the lock
-+ return;
-+ } else {
-+ // we have incremented the state, which means any tryAcquireBiased() will fail.
-+ // The next waiter may be waiting for us, so we need to re-set our state and then
-+ // try to push the lock to them.
-+ // We cannot simply claim ownership of the lock, since we don't know if the next waiter saw
-+ // the biased state
-+ owner.setStateRelease(LockWaiter.STATE_BIASED);
-+ LockSupport.unpark(next.owner);
-+
-+ acquireObj = new LockWaiter(currThread, 1, null);
-+ // fall through to slower lock logic
++ if (parents == null) {
++ parents = new ReferenceOpenHashSet<>();
++ }
++
++ if (parents.add(dependency)) {
++ // added a dependency, so we need to add as a child to the dependency
++ if (dependency.children == null) {
++ dependency.children = new ArrayList<>();
++ }
++ dependency.children.add(node);
+ }
-+ } else {
-+ // we already have the lock
-+ return;
+ }
-+ } else {
-+ acquireObj = new LockWaiter(currThread, 1, null);
-+ if (owner.getStateVolatile() == LockWaiter.STATE_BIASED) {
-+ // we may be able to quickly acquire the lock
-+ if (owner.getNextVolatile() == null && null == owner.compareAndExchangeNextVolatile(null, acquireObj)) {
-+ if (owner.getStateVolatile() == LockWaiter.STATE_BIASED) {
-+ this.setOwnerRelease(acquireObj);
-+ return;
++ }
++
++ if (parents == null) {
++ // no dependencies, so we can just return immediately
++ return node;
++ } // else: we need to lock
++
++ node.parents = parents;
++ }
++
++ while (!node.unlocked) {
++ LockSupport.park(node);
++ }
++
++ return node;
++ }
++
++ public void unlock(final Node node) {
++ List toUnpark = null;
++
++ final int x = node.x;
++ final int z = node.z;
++ final int radius = node.radius;
++
++ final int minX = (x - radius) >> this.coordinateShift;
++ final int minZ = (z - radius) >> this.coordinateShift;
++ final int maxX = (x + radius) >> this.coordinateShift;
++ final int maxZ = (z + radius) >> this.coordinateShift;
++
++ synchronized (this) {
++ final List children = node.children;
++ if (children != null) {
++ // try to unlock children
++ for (int i = 0, len = children.size(); i < len; ++i) {
++ final Node child = children.get(i);
++ if (!child.parents.remove(node)) {
++ throw new IllegalStateException();
++ }
++ if (child.parents.isEmpty()) {
++ // we can unlock, as it now has no dependencies in front
++ child.parents = null;
++ if (toUnpark == null) {
++ toUnpark = new ArrayList<>();
++ toUnpark.add(child);
+ } else {
-+ needAppend = false;
-+ // we failed to acquire, but we can block instead - we did CAS to the next immediate owner
++ toUnpark.add(child);
+ }
+ }
-+ } // else: fall through to append and wait code
++ }
++ }
++
++ // remove node from dependency map
++ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
++ for (int currX = minX; currX <= maxX; ++currX) {
++ // node: we only remove if we match, as a mismatch indicates a child node which of course has not
++ // yet been unlocked
++ this.nodesByPosition.remove(key(currX, currZ), node);
++ }
+ }
+ }
+
-+ if (needAppend) {
-+ this.appendWaiter(acquireObj); // append to end of waiters
-+ }
-+
-+ // failed to fast acquire, so now we may need to block
-+ final int spinAttempts = 10;
-+ for (int i = 0; i < spinAttempts; ++i) {
-+ for (int k = 0; k <= i; ++i) {
-+ Thread.onSpinWait();
-+ }
-+ if (this.tryAcquireBiased(acquireObj)) {
-+ // acquired
-+ return;
-+ }
-+ }
-+
-+ // slow acquire
-+ while (!this.tryAcquireBiased(acquireObj)) {
-+ LockSupport.park(this);
-+ }
-+ }
-+
-+ /**
-+ * {@inheritDoc}
-+ * @throws IllegalMonitorStateException If the current thread does not own the lock.
-+ */
-+ @Override
-+ public void unlock() {
-+ final LockWaiter owner = this.getOwnerVolatile();
-+
-+ final int oldState;
-+ if (owner.owner != Thread.currentThread() || (oldState = owner.getStatePlain()) <= 0) {
-+ throw new IllegalMonitorStateException();
-+ }
-+
-+ owner.setStateRelease(oldState - 1);
-+
-+ if (oldState != 1) {
++ if (toUnpark == null) {
+ return;
+ }
+
-+ final LockWaiter next = owner.getNextVolatile();
++ // we move the unpark / unlock logic here because we want to avoid performing work while holding the lock
+
-+ if (next == null) {
-+ // we can leave the lock in biased state, which will save a CAS
-+ return;
++ for (int i = 0, len = toUnpark.size(); i < len; ++i) {
++ final Node toUnlock = toUnpark.get(i);
++ toUnlock.unlocked = true; // must be volatile and before unpark()
++ LockSupport.unpark(toUnlock.thread);
+ }
-+
-+ // we have TWO cases:
-+ // waiter saw the lock in biased state
-+ // waiter did not see the lock in biased state
-+ // the problem is that if the waiter saw the lock in the biased state, then it now owns the lock. but if it did not,
-+ // then we still own the lock.
-+
-+ // However, by unparking always, the waiter will try to acquire the biased lock from us.
-+ LockSupport.unpark(next.owner);
+ }
+
-+ @Override
-+ public void lockInterruptibly() throws InterruptedException {
-+ throw new UnsupportedOperationException();
-+ }
++ public static final class Node {
+
-+ @Override
-+ public boolean tryLock() {
-+ throw new UnsupportedOperationException();
-+ }
++ public final int x;
++ public final int z;
++ public final int radius;
++ public final Thread thread;
+
-+ @Override
-+ public boolean tryLock(long time, TimeUnit unit) throws InterruptedException {
-+ throw new UnsupportedOperationException();
-+ }
++ private List children;
++ private ReferenceOpenHashSet parents;
+
-+ @Override
-+ public Condition newCondition() {
-+ throw new UnsupportedOperationException();
-+ }
++ private volatile boolean unlocked;
+
-+ static final class LockWaiter {
-+
-+ static final int STATE_BIASED = 0;
-+
-+ private volatile LockWaiter next;
-+ private volatile int state;
-+ private Thread owner;
-+
-+ private static final VarHandle NEXT_HANDLE = ConcurrentUtil.getVarHandle(LockWaiter.class, "next", LockWaiter.class);
-+ private static final VarHandle STATE_HANDLE = ConcurrentUtil.getVarHandle(LockWaiter.class, "state", int.class);
-+
-+
-+ private LockWaiter compareAndExchangeNextVolatile(final LockWaiter expect, final LockWaiter update) {
-+ return (LockWaiter)NEXT_HANDLE.compareAndExchange((LockWaiter)this, expect, update);
-+ }
-+
-+ private void setNextPlain(final LockWaiter next) {
-+ NEXT_HANDLE.set((LockWaiter)this, next);
-+ }
-+
-+ private LockWaiter getNextOpaque() {
-+ return (LockWaiter)NEXT_HANDLE.getOpaque((LockWaiter)this);
-+ }
-+
-+ private LockWaiter getNextVolatile() {
-+ return (LockWaiter)NEXT_HANDLE.getVolatile((LockWaiter)this);
-+ }
-+
-+
-+
-+ private int getStatePlain() {
-+ return (int)STATE_HANDLE.get((LockWaiter)this);
-+ }
-+
-+ private int getStateVolatile() {
-+ return (int)STATE_HANDLE.getVolatile((LockWaiter)this);
-+ }
-+
-+ private void setStatePlain(final int value) {
-+ STATE_HANDLE.set((LockWaiter)this, value);
-+ }
-+
-+ private void setStateRelease(final int value) {
-+ STATE_HANDLE.setRelease((LockWaiter)this, value);
-+ }
-+
-+ public LockWaiter(final Thread owner, final int initialState, final LockWaiter next) {
-+ this.owner = owner;
-+ this.setStatePlain(initialState);
-+ this.setNextPlain(next);
-+ }
-+
-+ public int incrementState() {
-+ final int old = this.getStatePlain();
-+ // Technically, we DO NOT need release for old != BIASED. But we care about optimising only for x86,
-+ // which is a simple MOV for everything but volatile.
-+ this.setStateRelease(old + 1);
-+ return old;
++ public Node(final int x, final int z, final int radius, final Thread thread) {
++ this.x = x;
++ this.z = z;
++ this.radius = radius;
++ this.thread = thread;
+ }
+ }
+}
@@ -2279,7 +1977,7 @@ index 61c170555c8854b102c640b0b6a615f9f732edbf..515cc130a411f218ed20628eb918be9d
}
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
-index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b84b8eac0 100644
+index c6d20bc2f0eab737338db6b88dacb63f0decb66c..04d2c42e69ed8ab27d21d3bf038de54675e5a148 100644
--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
@@ -3,7 +3,6 @@ package io.papermc.paper.chunk.system.scheduling;
@@ -2326,7 +2024,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
public final class ChunkHolderManager {
private static final Logger LOGGER = LogUtils.getClassLogger();
-@@ -63,40 +69,201 @@ public final class ChunkHolderManager {
+@@ -63,40 +69,198 @@ public final class ChunkHolderManager {
public static final int ENTITY_TICKING_TICKET_LEVEL = 31;
public static final int MAX_TICKET_LEVEL = ChunkMap.MAX_CHUNK_DISTANCE; // inclusive
@@ -2334,6 +2032,11 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
+ // Folia start - region threading
+ private static final long NO_TIMEOUT_MARKER = Long.MIN_VALUE;
+ private static final long PROBE_MARKER = Long.MIN_VALUE + 1;
++ // special region threading fields
++ // this field contains chunk holders that were created in addTicketAtLevel
++ // because the chunk holders were created without a reliable unload hook (i.e creation for entity/poi loading,
++ // which always check for unload after their tasks finish) we need to do that ourselves later
++ private final ReferenceOpenHashSet specialCaseUnload = new ReferenceOpenHashSet<>();
+ // Folia end - region threading
- final ReentrantLock ticketLock = new ReentrantLock();
@@ -2355,6 +2058,15 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
- return 0;
+ // Folia start - region threading
+ public static final class HolderManagerRegionData {
++ /*
++ * This region data is a bit of a mess, because it is part global state and part region state.
++ * Typically for region state we do not need to worry about threading concerns because it is only
++ * accessed by the current region when ticking. But since this contains state (
++ * tickets, and removeTickToChunkExpireTicketCount) that can be written to by any thread holding the
++ * ticket lock, the merge logic is complicated as merging only holds the region lock. So, Folia has modified
++ * the add and remove ticket functions to acquire the region lock if the current region does not own the target
++ * position.
++ */
+ private final ArrayDeque pendingFullLoadUpdate = new ArrayDeque<>();
+ private final ObjectRBTreeSet autoSaveQueue = new ObjectRBTreeSet<>((final NewChunkHolder c1, final NewChunkHolder c2) -> {
+ if (c1 == c2) {
@@ -2382,12 +2094,6 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
+ // this is a map of removal tick to a map of chunks and the number of tickets a chunk has that are to expire that tick
+ private final Long2ObjectOpenHashMap removeTickToChunkExpireTicketCount = new Long2ObjectOpenHashMap<>();
+
-+ // special region threading fields
-+ // this field contains chunk holders that were created in addTicketAtLevel
-+ // because the chunk holders were created without a reliable unload hook (i.e creation for entity/poi loading,
-+ // which always check for unload after their tasks finish) we need to do that ourselves later
-+ private final ReferenceOpenHashSet specialCaseUnload = new ReferenceOpenHashSet<>();
-+
+ public void merge(final HolderManagerRegionData into, final long tickOffset) {
+ // Order doesn't really matter for the pending full update...
+ into.pendingFullLoadUpdate.addAll(this.pendingFullLoadUpdate);
@@ -2428,9 +2134,6 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
+ }
+ );
+ }
-+
-+ // add them all
-+ into.specialCaseUnload.addAll(this.specialCaseUnload);
+ }
+
+ public void split(final int chunkToRegionShift, final Long2ReferenceOpenHashMap regionToData,
@@ -2488,59 +2191,51 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
+ }).put(chunkKey, count);
+ }
+ }
-+
-+ for (final NewChunkHolder special : this.specialCaseUnload) {
-+ final int regionCoordinateX = CoordinateUtils.getChunkX(special.chunkX) >> chunkToRegionShift;
-+ final int regionCoordinateZ = CoordinateUtils.getChunkZ(special.chunkZ) >> chunkToRegionShift;
-+
-+ // can never be null, since this chunk holder is loaded
-+ regionToData.get(CoordinateUtils.getChunkKey(regionCoordinateX, regionCoordinateZ)).specialCaseUnload.add(special);
-+ }
- }
++ }
+ }
+
+ private ChunkHolderManager.HolderManagerRegionData getCurrentRegionData() {
+ final ThreadedRegioniser.ThreadedRegion region =
+ TickRegionScheduler.getCurrentRegion();
-
-- final int saveTickCompare = Long.compare(c1.lastAutoSave, c2.lastAutoSave);
++
+ if (region == null) {
+ return null;
+ }
-
-- if (saveTickCompare != 0) {
-- return saveTickCompare;
++
+ if (this.world != null && this.world != region.getData().world) {
+ throw new IllegalStateException("World check failed: expected world: " + this.world.getWorld().getKey() + ", region world: " + region.getData().world.getWorld().getKey());
}
-- final long coord1 = CoordinateUtils.getChunkKey(c1.chunkX, c1.chunkZ);
-- final long coord2 = CoordinateUtils.getChunkKey(c2.chunkX, c2.chunkZ);
+- final int saveTickCompare = Long.compare(c1.lastAutoSave, c2.lastAutoSave);
+ return region.getData().getHolderManagerRegionData();
+ }
-
-- if (coord1 == coord2) {
-- throw new IllegalStateException("Duplicate chunkholder in auto save queue");
++
+ // MUST hold ticket lock
+ private ChunkHolderManager.HolderManagerRegionData getDataFor(final long key) {
+ return this.getDataFor(CoordinateUtils.getChunkX(key), CoordinateUtils.getChunkZ(key));
+ }
-+
+
+- if (saveTickCompare != 0) {
+- return saveTickCompare;
+ // MUST hold ticket lock
+ private ChunkHolderManager.HolderManagerRegionData getDataFor(final int chunkX, final int chunkZ) {
+ if (!this.ticketLock.isHeldByCurrentThread()) {
+ throw new IllegalStateException("Must hold ticket level lock");
}
-- return Long.compare(coord1, coord2);
-- });
+- final long coord1 = CoordinateUtils.getChunkKey(c1.chunkX, c1.chunkZ);
+- final long coord2 = CoordinateUtils.getChunkKey(c2.chunkX, c2.chunkZ);
+ final ThreadedRegioniser.ThreadedRegion region
+ = this.world.regioniser.getRegionAtUnsynchronised(chunkX, chunkZ);
-+
+
+- if (coord1 == coord2) {
+- throw new IllegalStateException("Duplicate chunkholder in auto save queue");
+ if (region == null) {
+ return null;
-+ }
-+
+ }
+
+- return Long.compare(coord1, coord2);
+- });
+ return region.getData().getHolderManagerRegionData();
+ }
+ // Folia end - region threading
@@ -2548,7 +2243,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
public ChunkHolderManager(final ServerLevel world, final ChunkTaskScheduler taskScheduler) {
this.world = world;
-@@ -129,8 +296,13 @@ public final class ChunkHolderManager {
+@@ -129,8 +293,13 @@ public final class ChunkHolderManager {
}
public void close(final boolean save, final boolean halt) {
@@ -2563,7 +2258,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
LOGGER.info("Waiting 60s for chunk system to halt for world '" + this.world.getWorld().getName() + "'");
if (!this.taskScheduler.halt(true, TimeUnit.SECONDS.toNanos(60L))) {
LOGGER.warn("Failed to halt world generation/loading tasks for world '" + this.world.getWorld().getName() + "'");
-@@ -140,9 +312,10 @@ public final class ChunkHolderManager {
+@@ -140,9 +309,10 @@ public final class ChunkHolderManager {
}
if (save) {
@@ -2575,7 +2270,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
if (this.world.chunkDataControllerNew.hasTasks() || this.world.entityDataControllerNew.hasTasks() || this.world.poiDataControllerNew.hasTasks()) {
RegionFileIOThread.flush();
}
-@@ -163,27 +336,34 @@ public final class ChunkHolderManager {
+@@ -163,27 +333,34 @@ public final class ChunkHolderManager {
} catch (final IOException ex) {
LOGGER.error("Failed to close poi regionfile cache for world '" + this.world.getWorld().getName() + "'", ex);
}
@@ -2617,7 +2312,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
holder.lastAutoSave = currentTick;
if (holder.save(false, false) != null) {
-@@ -197,15 +377,20 @@ public final class ChunkHolderManager {
+@@ -197,15 +374,20 @@ public final class ChunkHolderManager {
for (final NewChunkHolder holder : reschedule) {
if (holder.getChunkStatus().isOrAfter(ChunkHolder.FullChunkStatus.BORDER)) {
@@ -2640,7 +2335,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
LOGGER.info("Saving all chunkholders for world '" + this.world.getWorld().getName() + "'");
}
-@@ -213,7 +398,7 @@ public final class ChunkHolderManager {
+@@ -213,7 +395,7 @@ public final class ChunkHolderManager {
int saved = 0;
@@ -2649,7 +2344,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
long lastLog = start;
boolean needsFlush = false;
final int flushInterval = 50;
-@@ -224,6 +409,12 @@ public final class ChunkHolderManager {
+@@ -224,6 +406,12 @@ public final class ChunkHolderManager {
for (int i = 0, len = holders.size(); i < len; ++i) {
final NewChunkHolder holder = holders.get(i);
@@ -2662,7 +2357,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
try {
final NewChunkHolder.SaveStat saveStat = holder.save(shutdown, false);
if (saveStat != null) {
-@@ -256,7 +447,7 @@ public final class ChunkHolderManager {
+@@ -256,7 +444,7 @@ public final class ChunkHolderManager {
}
}
}
@@ -2671,7 +2366,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
RegionFileIOThread.flush();
}
if (logProgress) {
-@@ -290,18 +481,16 @@ public final class ChunkHolderManager {
+@@ -290,18 +478,16 @@ public final class ChunkHolderManager {
}
public boolean hasTickets() {
@@ -2695,7 +2390,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
return tickets != null ? tickets.first().toString() : "no_ticket";
} finally {
-@@ -312,7 +501,17 @@ public final class ChunkHolderManager {
+@@ -312,7 +498,17 @@ public final class ChunkHolderManager {
public Long2ObjectOpenHashMap>> getTicketsCopy() {
this.ticketLock.lock();
try {
@@ -2714,7 +2409,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
} finally {
this.ticketLock.unlock();
}
-@@ -322,7 +521,11 @@ public final class ChunkHolderManager {
+@@ -322,7 +518,11 @@ public final class ChunkHolderManager {
ImmutableList.Builder ret;
this.ticketLock.lock();
try {
@@ -2727,27 +2422,37 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
if (tickets == null) {
return Collections.emptyList();
-@@ -377,10 +580,27 @@ public final class ChunkHolderManager {
+@@ -375,12 +575,37 @@ public final class ChunkHolderManager {
+ return false;
+ }
++ // Folia start - region threading
++ final ThreadedRegioniser.ThreadedRegion currRegion = TickRegionScheduler.getCurrentRegion();
++ final boolean lock = currRegion == null || this.world.regioniser.getRegionAtUnsynchronised(
++ CoordinateUtils.getChunkX(chunk), CoordinateUtils.getChunkZ(chunk)
++ ) != currRegion;
++ // Folia end - region threading
++
this.ticketLock.lock();
try {
- final long removeTick = removeDelay == 0 ? NO_TIMEOUT_MARKER : this.currentTick + removeDelay;
+ // Folia start - region threading
+ NewChunkHolder holder = this.chunkHolders.get(chunk);
-+ final boolean addToSpecial = holder == null;
-+ if (addToSpecial) {
++ if (holder == null) {
+ // we need to guarantee that a chunk holder exists for each ticket
+ // this must be executed before retrieving the holder manager data for a target chunk, to ensure the
+ // region will exist
+ this.chunkHolders.put(chunk, holder = this.createChunkHolder(chunk));
++ this.specialCaseUnload.add(holder);
+ }
+
-+ final ChunkHolderManager.HolderManagerRegionData targetData = this.getDataFor(chunk);
-+ if (addToSpecial) {
-+ // no guarantee checkUnload is called for this chunk holder - by adding to the special case unload,
-+ // the unload chunks call will perform it
-+ targetData.specialCaseUnload.add(holder);
++ if (lock) {
++ // we just need to prevent merging, so we only need the read lock
++ // additionally, this will prevent deadlock in the remove all tickets function by using the read lock
++ this.world.regioniser.acquireReadLock();
+ }
++ try {
++ final ChunkHolderManager.HolderManagerRegionData targetData = lock ? this.getDataFor(chunk) : currRegion.getData().getHolderManagerRegionData();
+ // Folia end - region threading
+ final long removeTick = removeDelay == 0 ? NO_TIMEOUT_MARKER : targetData.currentTick + removeDelay; // Folia - region threading
final Ticket ticket = new Ticket<>(type, level, identifier, removeTick);
@@ -2757,7 +2462,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
return SortedArraySet.create(4);
});
-@@ -392,25 +612,25 @@ public final class ChunkHolderManager {
+@@ -392,25 +617,25 @@ public final class ChunkHolderManager {
final long oldRemovalTick = current.removalTick;
if (removeTick != oldRemovalTick) {
if (oldRemovalTick != NO_TIMEOUT_MARKER) {
@@ -2787,13 +2492,41 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
return new Long2IntOpenHashMap();
}).addTo(chunk, 1);
}
-@@ -441,33 +661,37 @@ public final class ChunkHolderManager {
+@@ -421,6 +646,11 @@ public final class ChunkHolderManager {
+ }
+ return current == ticket;
++ } finally { // Folia start - region threading
++ if (lock) {
++ this.world.regioniser.releaseReadLock();
++ }
++ } // Folia end - region threading
+ } finally {
+ this.ticketLock.unlock();
+ }
+@@ -439,35 +669,70 @@ public final class ChunkHolderManager {
+ return false;
+ }
+
++ // Folia start - region threading
++ final ThreadedRegioniser.ThreadedRegion currRegion = TickRegionScheduler.getCurrentRegion();
++ final boolean lock = currRegion == null || this.world.regioniser.getRegionAtUnsynchronised(
++ CoordinateUtils.getChunkX(chunk), CoordinateUtils.getChunkZ(chunk)
++ ) != currRegion;
++ // Folia end - region threading
++
this.ticketLock.lock();
try {
- final SortedArraySet> ticketsAtChunk = this.tickets.get(chunk);
+ // Folia start - region threading
-+ final ChunkHolderManager.HolderManagerRegionData targetData = this.getDataFor(chunk);
++ if (lock) {
++ // we just need to prevent merging, so we only need the read lock
++ // additionally, this will prevent deadlock in the remove all tickets function by using the read lock
++ this.world.regioniser.acquireReadLock();
++ }
++ try {
++ final ChunkHolderManager.HolderManagerRegionData targetData = lock ? this.getDataFor(chunk) : currRegion.getData().getHolderManagerRegionData();
++ // Folia end - region threading
+
+ final SortedArraySet> ticketsAtChunk = targetData == null ? null : targetData.tickets.get(chunk);
+ // Folia end - region threading
@@ -2809,12 +2542,30 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
return false;
}
++ int newLevel = getTicketLevelAt(ticketsAtChunk); // Folia - region threading - moved up from below
++ // Folia start - region threading
++ // we should not change the ticket levels while the target region may be ticking
++ if (newLevel > level) {
++ final long unknownRemoveTick = targetData.currentTick + Math.max(0, TicketType.UNKNOWN.timeout);
++ final Ticket unknownTicket = new Ticket<>(TicketType.UNKNOWN, level, new ChunkPos(chunk), unknownRemoveTick);
++ if (ticketsAtChunk.add(unknownTicket)) {
++ targetData.removeTickToChunkExpireTicketCount.computeIfAbsent(unknownRemoveTick, (final long keyInMap) -> {
++ return new Long2IntOpenHashMap();
++ }).addTo(chunk, 1);
++ } else {
++ throw new IllegalStateException("Should have been able to add " + unknownTicket + " to " + ticketsAtChunk);
++ }
++ newLevel = level;
++ }
++ // Folia end - region threading
++
if (ticketsAtChunk.isEmpty()) {
- this.tickets.remove(chunk);
+ targetData.tickets.remove(chunk); // Folia - region threading
}
- final int newLevel = getTicketLevelAt(ticketsAtChunk);
+- final int newLevel = getTicketLevelAt(ticketsAtChunk);
++ // Folia - region threading - move up
final long removeTick = ticket.removalTick;
if (removeTick != NO_TIMEOUT_MARKER) {
@@ -2830,21 +2581,19 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
}
}
}
-@@ -476,6 +700,13 @@ public final class ChunkHolderManager {
- this.updateTicketLevel(chunk, newLevel);
+@@ -477,6 +742,11 @@ public final class ChunkHolderManager {
}
-+ // Folia start - region threading
-+ // we should not change the ticket levels while the target region may be ticking
-+ if (newLevel > level) {
-+ this.addTicketAtLevel(TicketType.UNKNOWN, chunk, level, new ChunkPos(chunk));
-+ }
-+ // Folia end - region threading
-+
return true;
++ } finally { // Folia start - region threading
++ if (lock) {
++ this.world.regioniser.releaseReadLock();
++ }
++ } // Folia end - region threading
} finally {
this.ticketLock.unlock();
-@@ -516,24 +747,33 @@ public final class ChunkHolderManager {
+ }
+@@ -516,24 +786,33 @@ public final class ChunkHolderManager {
this.ticketLock.lock();
try {
@@ -2885,7 +2634,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
if (toRemove == null) {
return;
-@@ -546,10 +786,10 @@ public final class ChunkHolderManager {
+@@ -546,10 +825,10 @@ public final class ChunkHolderManager {
for (final LongIterator iterator = toRemove.keySet().longIterator(); iterator.hasNext();) {
final long chunk = iterator.nextLong();
@@ -2898,7 +2647,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
this.ticketLevelPropagator.removeSource(chunk);
} else {
this.ticketLevelPropagator.setSource(chunk, convertBetweenTicketLevels(tickets.first().getTicketLevel()));
-@@ -798,30 +1038,62 @@ public final class ChunkHolderManager {
+@@ -798,30 +1077,62 @@ public final class ChunkHolderManager {
if (changedFullStatus.isEmpty()) {
return;
}
@@ -2974,7 +2723,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
this.chunkHolders.remove(CoordinateUtils.getChunkKey(holder.chunkX, holder.chunkZ));
}
-@@ -839,23 +1111,42 @@ public final class ChunkHolderManager {
+@@ -839,6 +1150,8 @@ public final class ChunkHolderManager {
throw new IllegalStateException("Cannot hold scheduling lock while calling processUnloads");
}
@@ -2983,18 +2732,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
final List unloadQueue;
final List scheduleList = new ArrayList<>();
this.ticketLock.lock();
- try {
- this.taskScheduler.schedulingLock.lock();
- try {
-+ // Folia start - region threading
-+ for (final NewChunkHolder special : currentData.specialCaseUnload) {
-+ special.checkUnload();
-+ }
-+ currentData.specialCaseUnload.clear();
-+ // Folia end - region threading
- if (this.unloadQueue.isEmpty()) {
- return;
- }
+@@ -851,11 +1164,22 @@ public final class ChunkHolderManager {
// in order to ensure all chunks in the unload queue do not have a pending ticket level update,
// process them now
this.processTicketUpdates(false, false, scheduleList);
@@ -3021,7 +2759,25 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
if (chunkHolder.isSafeToUnload() != null) {
LOGGER.error("Chunkholder " + chunkHolder + " is not safe to unload but is inside the unload queue?");
continue;
-@@ -1193,7 +1484,12 @@ public final class ChunkHolderManager {
+@@ -1168,6 +1492,17 @@ public final class ChunkHolderManager {
+ }
+
+ this.ticketLevelUpdates.clear();
++
++ // Folia start - region threading
++ // it is possible that a special case new chunk holder had its ticket removed before it was propagated,
++ // which means checkUnload was never invoked. By checking unload here, we ensure that either the
++ // ticket level was propagated (in which case, a later depropagation would check again) or that
++ // we called checkUnload for it.
++ for (final NewChunkHolder special : this.specialCaseUnload) {
++ special.checkUnload();
++ }
++ this.specialCaseUnload.clear();
++ // Folia end - region threading
+ }
+ }
+ } finally {
+@@ -1193,7 +1528,12 @@ public final class ChunkHolderManager {
// only call on tick thread
protected final boolean processPendingFullUpdate() {
@@ -3035,7 +2791,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
boolean ret = false;
-@@ -1204,9 +1500,7 @@ public final class ChunkHolderManager {
+@@ -1204,9 +1544,7 @@ public final class ChunkHolderManager {
ret |= holder.handleFullStatusChange(changedFullStatus);
if (!changedFullStatus.isEmpty()) {
@@ -3046,7 +2802,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
changedFullStatus.clear();
}
}
-@@ -1256,7 +1550,7 @@ public final class ChunkHolderManager {
+@@ -1256,7 +1594,7 @@ public final class ChunkHolderManager {
private JsonObject getDebugJsonNoLock() {
final JsonObject ret = new JsonObject();
@@ -3055,7 +2811,7 @@ index c6d20bc2f0eab737338db6b88dacb63f0decb66c..32b88d7902e877e1cce0b7635cbfa67b
final JsonArray unloadQueue = new JsonArray();
ret.add("unload_queue", unloadQueue);
-@@ -1275,60 +1569,73 @@ public final class ChunkHolderManager {
+@@ -1275,60 +1613,73 @@ public final class ChunkHolderManager {
holders.add(holder.getDebugJson());
}
@@ -6163,10 +5919,10 @@ index 0000000000000000000000000000000000000000..84b4ff07735fb84e28ee8966ffdedb1b
+}
diff --git a/src/main/java/io/papermc/paper/threadedregions/ThreadedRegioniser.java b/src/main/java/io/papermc/paper/threadedregions/ThreadedRegioniser.java
new file mode 100644
-index 0000000000000000000000000000000000000000..f6e41c466ba2501f82fd7916742c5fc045ddf828
+index 0000000000000000000000000000000000000000..2334e62953a4d0a415c4c1fe653b6da063119868
--- /dev/null
+++ b/src/main/java/io/papermc/paper/threadedregions/ThreadedRegioniser.java
-@@ -0,0 +1,1203 @@
+@@ -0,0 +1,1211 @@
+package io.papermc.paper.threadedregions;
+
+import ca.spottedleaf.concurrentutil.map.SWMRLong2ObjectHashTable;
@@ -6299,6 +6055,14 @@ index 0000000000000000000000000000000000000000..f6e41c466ba2501f82fd7916742c5fc0
+ }
+ */
+
++ public void acquireReadLock() {
++ this.regionLock.readLock();
++ }
++
++ public void releaseReadLock() {
++ this.regionLock.tryUnlockRead();
++ }
++
+ private void acquireWriteLock() {
+ final Thread currentThread = Thread.currentThread();
+ if (this.writeLockOwner == currentThread) {
diff --git a/patches/server/0005-Increase-parallelism-for-neighbour-writing-chunk-sta.patch b/patches/server/0005-Increase-parallelism-for-neighbour-writing-chunk-sta.patch
index 93b975a..62981eb 100644
--- a/patches/server/0005-Increase-parallelism-for-neighbour-writing-chunk-sta.patch
+++ b/patches/server/0005-Increase-parallelism-for-neighbour-writing-chunk-sta.patch
@@ -196,10 +196,10 @@ index 0b7a2b0ead4f3bc07bfd9a38c2b7cf024bd140c6..36e93fefdfbebddce4c153974c7cd81a
final int chunkX = CoordinateUtils.getChunkX(coordinate);
final int chunkZ = CoordinateUtils.getChunkZ(coordinate);
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
-index 32b88d7902e877e1cce0b7635cbfa67b84b8eac0..e787c54b8f4be6923370a704d1c414f5f3274bae 100644
+index 04d2c42e69ed8ab27d21d3bf038de54675e5a148..bb5e5b9d48cb6d459119f66955017cced5af501c 100644
--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
-@@ -1306,17 +1306,23 @@ public final class ChunkHolderManager {
+@@ -1339,17 +1339,23 @@ public final class ChunkHolderManager {
}
public Boolean tryDrainTicketUpdates() {
@@ -341,10 +341,10 @@ index 73ce0909bd89244835a0d0f2030a25871461f1e0..ecc366a4176b2efadc46aa91aa21621f
@Override
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java
new file mode 100644
-index 0000000000000000000000000000000000000000..6648f8cc5c1788ec02a3adbb68cf126372017dd3
+index 0000000000000000000000000000000000000000..3272f73013ea7d4efdd0ae2903925cc543be7075
--- /dev/null
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java
-@@ -0,0 +1,664 @@
+@@ -0,0 +1,668 @@
+package io.papermc.paper.chunk.system.scheduling.queue;
+
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
@@ -698,8 +698,6 @@ index 0000000000000000000000000000000000000000..6648f8cc5c1788ec02a3adbb68cf1263
+ return null;
+ }
+
-+ ++this.currentlyExecuting;
-+
+ // firstAwaiting compared to firstInfinite
+ final int compare;
+
@@ -714,10 +712,16 @@ index 0000000000000000000000000000000000000000..6648f8cc5c1788ec02a3adbb68cf1263
+ }
+
+ if (compare >= 0) {
++ if (this.currentlyExecuting != 0) {
++ // don't queue infinite task while other tasks are executing in parallel
++ return null;
++ }
++ ++this.currentlyExecuting;
+ this.pollInfinite();
+ this.isInfiniteRadiusScheduled = true;
+ return firstInfinite.task.pushTask(this.executor);
+ } else {
++ ++this.currentlyExecuting;
+ this.pollAwaiting();
+ return firstAwaiting.task.pushTask(this.executor);
+ }
@@ -771,7 +775,7 @@ index 0000000000000000000000000000000000000000..6648f8cc5c1788ec02a3adbb68cf1263
+ }
+
+ private boolean isFiniteRadius() {
-+ return this.radius > 0;
++ return this.radius >= 0;
+ }
+
+ private PrioritisedExecutor.PrioritisedTask pushTask(final PrioritisedExecutor executor) {
diff --git a/patches/server/0010-fixup-Threaded-Regions.patch b/patches/server/0010-fixup-Threaded-Regions.patch
deleted file mode 100644
index 2883d0f..0000000
--- a/patches/server/0010-fixup-Threaded-Regions.patch
+++ /dev/null
@@ -1,128 +0,0 @@
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Spottedleaf
-Date: Sat, 11 Mar 2023 23:27:32 -0800
-Subject: [PATCH] fixup! Threaded Regions
-
-
-diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
-index e787c54b8f4be6923370a704d1c414f5f3274bae..d2386ee333927aedd9235212780fee04630a8510 100644
---- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
-+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
-@@ -72,6 +72,11 @@ public final class ChunkHolderManager {
- // Folia start - region threading
- private static final long NO_TIMEOUT_MARKER = Long.MIN_VALUE;
- private static final long PROBE_MARKER = Long.MIN_VALUE + 1;
-+ // special region threading fields
-+ // this field contains chunk holders that were created in addTicketAtLevel
-+ // because the chunk holders were created without a reliable unload hook (i.e creation for entity/poi loading,
-+ // which always check for unload after their tasks finish) we need to do that ourselves later
-+ private final ReferenceOpenHashSet specialCaseUnload = new ReferenceOpenHashSet<>();
- // Folia end - region threading
-
- public final ReentrantLock ticketLock = new ReentrantLock(); // Folia - region threading
-@@ -83,6 +88,13 @@ public final class ChunkHolderManager {
-
- // Folia start - region threading
- public static final class HolderManagerRegionData {
-+ /*
-+ * This region data is a bit of a mess, because it is part global state and part region state.
-+ * Typically for region state we do not need to worry about threading concerns because it is only
-+ * accessed by the current region when ticking. But since this contains state (
-+ * tickets, and removeTickToChunkExpireTicketCount) that can be written to by any thread holding the
-+ * ticket lock, the merge logic is complicated as merging only holds the region lock.
-+ */
- private final ArrayDeque pendingFullLoadUpdate = new ArrayDeque<>();
- private final ObjectRBTreeSet autoSaveQueue = new ObjectRBTreeSet<>((final NewChunkHolder c1, final NewChunkHolder c2) -> {
- if (c1 == c2) {
-@@ -110,12 +122,6 @@ public final class ChunkHolderManager {
- // this is a map of removal tick to a map of chunks and the number of tickets a chunk has that are to expire that tick
- private final Long2ObjectOpenHashMap removeTickToChunkExpireTicketCount = new Long2ObjectOpenHashMap<>();
-
-- // special region threading fields
-- // this field contains chunk holders that were created in addTicketAtLevel
-- // because the chunk holders were created without a reliable unload hook (i.e creation for entity/poi loading,
-- // which always check for unload after their tasks finish) we need to do that ourselves later
-- private final ReferenceOpenHashSet specialCaseUnload = new ReferenceOpenHashSet<>();
--
- public void merge(final HolderManagerRegionData into, final long tickOffset) {
- // Order doesn't really matter for the pending full update...
- into.pendingFullLoadUpdate.addAll(this.pendingFullLoadUpdate);
-@@ -156,9 +162,6 @@ public final class ChunkHolderManager {
- }
- );
- }
--
-- // add them all
-- into.specialCaseUnload.addAll(this.specialCaseUnload);
- }
-
- public void split(final int chunkToRegionShift, final Long2ReferenceOpenHashMap regionToData,
-@@ -216,14 +219,6 @@ public final class ChunkHolderManager {
- }).put(chunkKey, count);
- }
- }
--
-- for (final NewChunkHolder special : this.specialCaseUnload) {
-- final int regionCoordinateX = CoordinateUtils.getChunkX(special.chunkX) >> chunkToRegionShift;
-- final int regionCoordinateZ = CoordinateUtils.getChunkZ(special.chunkZ) >> chunkToRegionShift;
--
-- // can never be null, since this chunk holder is loaded
-- regionToData.get(CoordinateUtils.getChunkKey(regionCoordinateX, regionCoordinateZ)).specialCaseUnload.add(special);
-- }
- }
- }
-
-@@ -582,20 +577,15 @@ public final class ChunkHolderManager {
- try {
- // Folia start - region threading
- NewChunkHolder holder = this.chunkHolders.get(chunk);
-- final boolean addToSpecial = holder == null;
-- if (addToSpecial) {
-+ if (holder == null) {
- // we need to guarantee that a chunk holder exists for each ticket
- // this must be executed before retrieving the holder manager data for a target chunk, to ensure the
- // region will exist
- this.chunkHolders.put(chunk, holder = this.createChunkHolder(chunk));
-+ this.specialCaseUnload.add(holder);
- }
-
- final ChunkHolderManager.HolderManagerRegionData targetData = this.getDataFor(chunk);
-- if (addToSpecial) {
-- // no guarantee checkUnload is called for this chunk holder - by adding to the special case unload,
-- // the unload chunks call will perform it
-- targetData.specialCaseUnload.add(holder);
-- }
- // Folia end - region threading
- final long removeTick = removeDelay == 0 ? NO_TIMEOUT_MARKER : targetData.currentTick + removeDelay; // Folia - region threading
- final Ticket ticket = new Ticket<>(type, level, identifier, removeTick);
-@@ -1119,12 +1109,6 @@ public final class ChunkHolderManager {
- try {
- this.taskScheduler.schedulingLock.lock();
- try {
-- // Folia start - region threading
-- for (final NewChunkHolder special : currentData.specialCaseUnload) {
-- special.checkUnload();
-- }
-- currentData.specialCaseUnload.clear();
-- // Folia end - region threading
- if (this.unloadQueue.isEmpty()) {
- return;
- }
-@@ -1465,6 +1449,17 @@ public final class ChunkHolderManager {
- }
-
- this.ticketLevelUpdates.clear();
-+
-+ // Folia start - region threading
-+ // it is possible that a special case new chunk holder had its ticket removed before it was propagated,
-+ // which means checkUnload was never invoked. By checking unload here, we ensure that either the
-+ // ticket level was propagated (in which case, a later depropagation would check again) or that
-+ // we called checkUnload for it.
-+ for (final NewChunkHolder special : this.specialCaseUnload) {
-+ special.checkUnload();
-+ }
-+ this.specialCaseUnload.clear();
-+ // Folia end - region threading
- }
- }
- } finally {
diff --git a/patches/server/0011-fixup-Increase-parallelism-for-neighbour-writing-chu.patch b/patches/server/0011-fixup-Increase-parallelism-for-neighbour-writing-chu.patch
deleted file mode 100644
index cdf563a..0000000
--- a/patches/server/0011-fixup-Increase-parallelism-for-neighbour-writing-chu.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Spottedleaf
-Date: Sun, 12 Mar 2023 00:14:32 -0800
-Subject: [PATCH] fixup! Increase parallelism for neighbour writing chunk
- statuses
-
-
-diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java
-index 6648f8cc5c1788ec02a3adbb68cf126372017dd3..1c8e05d5dde630fdee01900823d26c293f375abc 100644
---- a/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java
-+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java
-@@ -351,8 +351,6 @@ public class RadiusAwarePrioritisedExecutor {
- return null;
- }
-
-- ++this.currentlyExecuting;
--
- // firstAwaiting compared to firstInfinite
- final int compare;
-
-@@ -367,10 +365,16 @@ public class RadiusAwarePrioritisedExecutor {
- }
-
- if (compare >= 0) {
-+ if (this.currentlyExecuting != 0) {
-+ // don't queue infinite task while other tasks are executing in parallel
-+ return null;
-+ }
-+ ++this.currentlyExecuting;
- this.pollInfinite();
- this.isInfiniteRadiusScheduled = true;
- return firstInfinite.task.pushTask(this.executor);
- } else {
-+ ++this.currentlyExecuting;
- this.pollAwaiting();
- return firstAwaiting.task.pushTask(this.executor);
- }
diff --git a/patches/server/0012-fixup-Increase-parallelism-for-neighbour-writing-chu.patch b/patches/server/0012-fixup-Increase-parallelism-for-neighbour-writing-chu.patch
deleted file mode 100644
index 050e5fc..0000000
--- a/patches/server/0012-fixup-Increase-parallelism-for-neighbour-writing-chu.patch
+++ /dev/null
@@ -1,20 +0,0 @@
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Spottedleaf
-Date: Sun, 12 Mar 2023 00:51:29 -0800
-Subject: [PATCH] fixup! Increase parallelism for neighbour writing chunk
- statuses
-
-
-diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java
-index 1c8e05d5dde630fdee01900823d26c293f375abc..3272f73013ea7d4efdd0ae2903925cc543be7075 100644
---- a/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java
-+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java
-@@ -428,7 +428,7 @@ public class RadiusAwarePrioritisedExecutor {
- }
-
- private boolean isFiniteRadius() {
-- return this.radius > 0;
-+ return this.radius >= 0;
- }
-
- private PrioritisedExecutor.PrioritisedTask pushTask(final PrioritisedExecutor executor) {
diff --git a/patches/server/0013-fixup-Threaded-Regions.patch b/patches/server/0013-fixup-Threaded-Regions.patch
deleted file mode 100644
index 277d5e6..0000000
--- a/patches/server/0013-fixup-Threaded-Regions.patch
+++ /dev/null
@@ -1,763 +0,0 @@
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Spottedleaf
-Date: Sun, 12 Mar 2023 15:00:00 -0700
-Subject: [PATCH] fixup! Threaded Regions
-
-
-diff --git a/src/main/java/ca/spottedleaf/concurrentutil/lock/AreaLock.java b/src/main/java/ca/spottedleaf/concurrentutil/lock/AreaLock.java
-new file mode 100644
-index 0000000000000000000000000000000000000000..6a155b779914828a0d4199bdfcb0d6fca25e1581
---- /dev/null
-+++ b/src/main/java/ca/spottedleaf/concurrentutil/lock/AreaLock.java
-@@ -0,0 +1,146 @@
-+package ca.spottedleaf.concurrentutil.lock;
-+
-+import it.unimi.dsi.fastutil.longs.Long2ReferenceOpenHashMap;
-+import it.unimi.dsi.fastutil.objects.ReferenceOpenHashSet;
-+import java.util.ArrayList;
-+import java.util.List;
-+import java.util.concurrent.locks.LockSupport;
-+
-+public final class AreaLock {
-+
-+ private final int coordinateShift;
-+
-+ private final Long2ReferenceOpenHashMap nodesByPosition = new Long2ReferenceOpenHashMap<>(1024, 0.10f);
-+
-+ public AreaLock(final int coordinateShift) {
-+ this.coordinateShift = coordinateShift;
-+ }
-+
-+ private static long key(final int x, final int z) {
-+ return ((long)z << 32) | (x & 0xFFFFFFFFL);
-+ }
-+
-+ public Node lock(final int x, final int z, final int radius) {
-+ final Thread thread = Thread.currentThread();
-+ final int minX = (x - radius) >> this.coordinateShift;
-+ final int minZ = (z - radius) >> this.coordinateShift;
-+ final int maxX = (x + radius) >> this.coordinateShift;
-+ final int maxZ = (z + radius) >> this.coordinateShift;
-+
-+ final Node node = new Node(x, z, radius, thread);
-+
-+ synchronized (this) {
-+ ReferenceOpenHashSet parents = null;
-+ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
-+ for (int currX = minX; currX <= maxX; ++currX) {
-+ final Node dependency = this.nodesByPosition.put(key(currX, currZ), node);
-+ if (dependency == null) {
-+ continue;
-+ }
-+
-+ if (parents == null) {
-+ parents = new ReferenceOpenHashSet<>();
-+ }
-+
-+ if (parents.add(dependency)) {
-+ // added a dependency, so we need to add as a child to the dependency
-+ if (dependency.children == null) {
-+ dependency.children = new ArrayList<>();
-+ }
-+ dependency.children.add(node);
-+ }
-+ }
-+ }
-+
-+ if (parents == null) {
-+ // no dependencies, so we can just return immediately
-+ return node;
-+ } // else: we need to lock
-+
-+ node.parents = parents;
-+ }
-+
-+ while (!node.unlocked) {
-+ LockSupport.park(node);
-+ }
-+
-+ return node;
-+ }
-+
-+ public void unlock(final Node node) {
-+ List toUnpark = null;
-+
-+ final int x = node.x;
-+ final int z = node.z;
-+ final int radius = node.radius;
-+
-+ final int minX = (x - radius) >> this.coordinateShift;
-+ final int minZ = (z - radius) >> this.coordinateShift;
-+ final int maxX = (x + radius) >> this.coordinateShift;
-+ final int maxZ = (z + radius) >> this.coordinateShift;
-+
-+ synchronized (this) {
-+ final List children = node.children;
-+ if (children != null) {
-+ // try to unlock children
-+ for (int i = 0, len = children.size(); i < len; ++i) {
-+ final Node child = children.get(i);
-+ if (!child.parents.remove(node)) {
-+ throw new IllegalStateException();
-+ }
-+ if (child.parents.isEmpty()) {
-+ // we can unlock, as it now has no dependencies in front
-+ child.parents = null;
-+ if (toUnpark == null) {
-+ toUnpark = new ArrayList<>();
-+ toUnpark.add(child);
-+ } else {
-+ toUnpark.add(child);
-+ }
-+ }
-+ }
-+ }
-+
-+ // remove node from dependency map
-+ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
-+ for (int currX = minX; currX <= maxX; ++currX) {
-+ // node: we only remove if we match, as a mismatch indicates a child node which of course has not
-+ // yet been unlocked
-+ this.nodesByPosition.remove(key(currX, currZ), node);
-+ }
-+ }
-+ }
-+
-+ if (toUnpark == null) {
-+ return;
-+ }
-+
-+ // we move the unpark / unlock logic here because we want to avoid performing work while holding the lock
-+
-+ for (int i = 0, len = toUnpark.size(); i < len; ++i) {
-+ final Node toUnlock = toUnpark.get(i);
-+ toUnlock.unlocked = true; // must be volatile and before unpark()
-+ LockSupport.unpark(toUnlock.thread);
-+ }
-+ }
-+
-+ public static final class Node {
-+
-+ public final int x;
-+ public final int z;
-+ public final int radius;
-+ public final Thread thread;
-+
-+ private List children;
-+ private ReferenceOpenHashSet parents;
-+
-+ private volatile boolean unlocked;
-+
-+ public Node(final int x, final int z, final int radius, final Thread thread) {
-+ this.x = x;
-+ this.z = z;
-+ this.radius = radius;
-+ this.thread = thread;
-+ }
-+ }
-+}
-diff --git a/src/main/java/ca/spottedleaf/concurrentutil/lock/ImproveReentrantLock.java b/src/main/java/ca/spottedleaf/concurrentutil/lock/ImproveReentrantLock.java
-deleted file mode 100644
-index 9df9881396f4a69b51acaae562b12b8ce0a48443..0000000000000000000000000000000000000000
---- a/src/main/java/ca/spottedleaf/concurrentutil/lock/ImproveReentrantLock.java
-+++ /dev/null
-@@ -1,139 +0,0 @@
--package ca.spottedleaf.concurrentutil.lock;
--
--import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
--import java.lang.invoke.VarHandle;
--import java.util.concurrent.TimeUnit;
--import java.util.concurrent.locks.AbstractQueuedSynchronizer;
--import java.util.concurrent.locks.Condition;
--import java.util.concurrent.locks.Lock;
--
--/**
-- * Implementation of {@link Lock} that should outperform {@link java.util.concurrent.locks.ReentrantLock}.
-- * The lock is considered a non-fair lock, as specified by {@link java.util.concurrent.locks.ReentrantLock},
-- * and additionally does not support the creation of Conditions.
-- *
-- *
-- * Specifically, this implementation is careful to avoid synchronisation penalties when multi-acquiring and
-- * multi-releasing locks from the same thread, and additionally avoids unnecessary synchronisation penalties
-- * when releasing the lock.
-- *
-- */
--public class ImproveReentrantLock implements Lock {
--
-- private final InternalLock lock = new InternalLock();
--
-- private static final class InternalLock extends AbstractQueuedSynchronizer {
--
-- private volatile Thread owner;
-- private static final VarHandle OWNER_HANDLE = ConcurrentUtil.getVarHandle(InternalLock.class, "owner", Thread.class);
-- private int count;
--
-- private Thread getOwnerPlain() {
-- return (Thread)OWNER_HANDLE.get(this);
-- }
--
-- private Thread getOwnerVolatile() {
-- return (Thread)OWNER_HANDLE.getVolatile(this);
-- }
--
-- private void setOwnerRelease(final Thread to) {
-- OWNER_HANDLE.setRelease(this, to);
-- }
--
-- private void setOwnerVolatile(final Thread to) {
-- OWNER_HANDLE.setVolatile(this, to);
-- }
--
-- private Thread compareAndExchangeOwnerVolatile(final Thread expect, final Thread update) {
-- return (Thread)OWNER_HANDLE.compareAndExchange(this, expect, update);
-- }
--
-- @Override
-- protected final boolean tryAcquire(int acquires) {
-- final Thread current = Thread.currentThread();
-- final Thread owner = this.getOwnerVolatile();
--
-- // When trying to blind acquire the lock, using just compare and exchange is faster
-- // than reading the owner field first - but comes at the cost of performing the compare and exchange
-- // even if the current thread owns the lock
-- if ((owner == null && null == this.compareAndExchangeOwnerVolatile(null, current)) || owner == current) {
-- this.count += acquires;
-- return true;
-- }
--
-- return false;
-- }
--
-- @Override
-- protected final boolean tryRelease(int releases) {
-- if (this.getOwnerPlain() == Thread.currentThread()) {
-- final int newCount = this.count -= releases;
-- if (newCount == 0) {
-- // When the caller, which is release(), attempts to signal the next node, it will use volatile
-- // to retrieve the node and status.
-- // Let's say that we have written this field null as release, and then checked for a next node
-- // using volatile and then determined there are no waiters.
-- // While a call to tryAcquire() can fail for another thread since the write may not
-- // publish yet, once the thread adds itself to the waiters list it will synchronise with
-- // the write to the field, since the volatile write to put the thread on the waiter list
-- // will synchronise with the volatile read we did earlier to check for any
-- // waiters.
-- this.setOwnerRelease(null);
-- return true;
-- }
-- return false;
-- }
-- throw new IllegalMonitorStateException();
-- }
-- }
--
-- /**
-- * Returns the thread that owns the lock, or returns {@code null} if there is no such thread.
-- */
-- public Thread getLockOwner() {
-- return this.lock.getOwnerVolatile();
-- }
--
-- /**
-- * Returns whether the current thread owns the lock.
-- */
-- public boolean isHeldByCurrentThread() {
-- return this.lock.getOwnerPlain() == Thread.currentThread();
-- }
--
-- @Override
-- public void lock() {
-- this.lock.acquire(1);
-- }
--
-- @Override
-- public void lockInterruptibly() throws InterruptedException {
-- if (Thread.interrupted()) {
-- throw new InterruptedException();
-- }
-- this.lock.acquireInterruptibly(1);
-- }
--
-- @Override
-- public boolean tryLock() {
-- return this.lock.tryAcquire(1);
-- }
--
-- @Override
-- public boolean tryLock(final long time, final TimeUnit unit) throws InterruptedException {
-- if (Thread.interrupted()) {
-- throw new InterruptedException();
-- }
-- return this.lock.tryAcquire(1) || this.lock.tryAcquireNanos(1, unit.toNanos(time));
-- }
--
-- @Override
-- public void unlock() {
-- this.lock.release(1);
-- }
--
-- @Override
-- public Condition newCondition() {
-- throw new UnsupportedOperationException();
-- }
--}
-diff --git a/src/main/java/ca/spottedleaf/concurrentutil/lock/RBLock.java b/src/main/java/ca/spottedleaf/concurrentutil/lock/RBLock.java
-deleted file mode 100644
-index 793a7326141b7d83395585b3d32b0a7e8a6238a7..0000000000000000000000000000000000000000
---- a/src/main/java/ca/spottedleaf/concurrentutil/lock/RBLock.java
-+++ /dev/null
-@@ -1,303 +0,0 @@
--package ca.spottedleaf.concurrentutil.lock;
--
--import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
--import java.lang.invoke.VarHandle;
--import java.util.concurrent.TimeUnit;
--import java.util.concurrent.locks.Condition;
--import java.util.concurrent.locks.Lock;
--import java.util.concurrent.locks.LockSupport;
--
--// ReentrantBiasedLock
--public final class RBLock implements Lock {
--
-- private volatile LockWaiter owner;
-- private static final VarHandle OWNER_HANDLE = ConcurrentUtil.getVarHandle(RBLock.class, "owner", LockWaiter.class);
--
-- private volatile LockWaiter tail;
-- private static final VarHandle TAIL_HANDLE = ConcurrentUtil.getVarHandle(RBLock.class, "tail", LockWaiter.class);
--
-- public RBLock() {
-- // we can have the initial state as if it was locked by this thread, then unlocked
-- final LockWaiter dummy = new LockWaiter(null, LockWaiter.STATE_BIASED, null);
-- this.setOwnerPlain(dummy);
-- // release ensures correct publishing
-- this.setTailRelease(dummy);
-- }
--
-- private LockWaiter getOwnerVolatile() {
-- return (LockWaiter)OWNER_HANDLE.getVolatile(this);
-- }
--
-- private void setOwnerPlain(final LockWaiter value) {
-- OWNER_HANDLE.set(this, value);
-- }
--
-- private void setOwnerRelease(final LockWaiter value) {
-- OWNER_HANDLE.setRelease(this, value);
-- }
--
--
--
-- private void setTailOpaque(final LockWaiter newTail) {
-- TAIL_HANDLE.setOpaque(this, newTail);
-- }
--
-- private void setTailRelease(final LockWaiter newTail) {
-- TAIL_HANDLE.setRelease(this, newTail);
-- }
--
-- private LockWaiter getTailOpaque() {
-- return (LockWaiter)TAIL_HANDLE.getOpaque(this);
-- }
--
--
-- private void appendWaiter(final LockWaiter waiter) {
-- // Similar to MultiThreadedQueue#appendList
-- int failures = 0;
--
-- for (LockWaiter currTail = this.getTailOpaque(), curr = currTail;;) {
-- /* It has been experimentally shown that placing the read before the backoff results in significantly greater performance */
-- /* It is likely due to a cache miss caused by another write to the next field */
-- final LockWaiter next = curr.getNextVolatile();
--
-- for (int i = 0; i < failures; ++i) {
-- Thread.onSpinWait();
-- }
--
-- if (next == null) {
-- final LockWaiter compared = curr.compareAndExchangeNextVolatile(null, waiter);
--
-- if (compared == null) {
-- /* Added */
-- /* Avoid CASing on tail more than we need to */
-- /* CAS to avoid setting an out-of-date tail */
-- if (this.getTailOpaque() == currTail) {
-- this.setTailOpaque(waiter);
-- }
-- return;
-- }
--
-- ++failures;
-- curr = compared;
-- continue;
-- }
--
-- if (curr == currTail) {
-- /* Tail is likely not up-to-date */
-- curr = next;
-- } else {
-- /* Try to update to tail */
-- if (currTail == (currTail = this.getTailOpaque())) {
-- curr = next;
-- } else {
-- curr = currTail;
-- }
-- }
-- }
-- }
--
-- // required that expected is already appended to the wait chain
-- private boolean tryAcquireBiased(final LockWaiter expected) {
-- final LockWaiter owner = this.getOwnerVolatile();
-- if (owner.getNextVolatile() == expected && owner.getStateVolatile() == LockWaiter.STATE_BIASED) {
-- this.setOwnerRelease(expected);
-- return true;
-- }
-- return false;
-- }
--
-- @Override
-- public void lock() {
-- final Thread currThread = Thread.currentThread();
-- final LockWaiter owner = this.getOwnerVolatile();
--
-- // try to fast acquire
--
-- final LockWaiter acquireObj;
-- boolean needAppend = true;
--
-- if (owner.getNextVolatile() != null) {
-- // unlikely we are able to fast acquire
-- acquireObj = new LockWaiter(currThread, 1, null);
-- } else {
-- // may be able to fast acquire the lock
-- if (owner.owner == currThread) {
-- final int oldState = owner.incrementState();
-- if (oldState == LockWaiter.STATE_BIASED) {
-- // in this case, we may not have the lock.
-- final LockWaiter next = owner.getNextVolatile();
-- if (next == null) {
-- // we win the lock
-- return;
-- } else {
-- // we have incremented the state, which means any tryAcquireBiased() will fail.
-- // The next waiter may be waiting for us, so we need to re-set our state and then
-- // try to push the lock to them.
-- // We cannot simply claim ownership of the lock, since we don't know if the next waiter saw
-- // the biased state
-- owner.setStateRelease(LockWaiter.STATE_BIASED);
-- LockSupport.unpark(next.owner);
--
-- acquireObj = new LockWaiter(currThread, 1, null);
-- // fall through to slower lock logic
-- }
-- } else {
-- // we already have the lock
-- return;
-- }
-- } else {
-- acquireObj = new LockWaiter(currThread, 1, null);
-- if (owner.getStateVolatile() == LockWaiter.STATE_BIASED) {
-- // we may be able to quickly acquire the lock
-- if (owner.getNextVolatile() == null && null == owner.compareAndExchangeNextVolatile(null, acquireObj)) {
-- if (owner.getStateVolatile() == LockWaiter.STATE_BIASED) {
-- this.setOwnerRelease(acquireObj);
-- return;
-- } else {
-- needAppend = false;
-- // we failed to acquire, but we can block instead - we did CAS to the next immediate owner
-- }
-- }
-- } // else: fall through to append and wait code
-- }
-- }
--
-- if (needAppend) {
-- this.appendWaiter(acquireObj); // append to end of waiters
-- }
--
-- // failed to fast acquire, so now we may need to block
-- final int spinAttempts = 10;
-- for (int i = 0; i < spinAttempts; ++i) {
-- for (int k = 0; k <= i; ++i) {
-- Thread.onSpinWait();
-- }
-- if (this.tryAcquireBiased(acquireObj)) {
-- // acquired
-- return;
-- }
-- }
--
-- // slow acquire
-- while (!this.tryAcquireBiased(acquireObj)) {
-- LockSupport.park(this);
-- }
-- }
--
-- /**
-- * {@inheritDoc}
-- * @throws IllegalMonitorStateException If the current thread does not own the lock.
-- */
-- @Override
-- public void unlock() {
-- final LockWaiter owner = this.getOwnerVolatile();
--
-- final int oldState;
-- if (owner.owner != Thread.currentThread() || (oldState = owner.getStatePlain()) <= 0) {
-- throw new IllegalMonitorStateException();
-- }
--
-- owner.setStateRelease(oldState - 1);
--
-- if (oldState != 1) {
-- return;
-- }
--
-- final LockWaiter next = owner.getNextVolatile();
--
-- if (next == null) {
-- // we can leave the lock in biased state, which will save a CAS
-- return;
-- }
--
-- // we have TWO cases:
-- // waiter saw the lock in biased state
-- // waiter did not see the lock in biased state
-- // the problem is that if the waiter saw the lock in the biased state, then it now owns the lock. but if it did not,
-- // then we still own the lock.
--
-- // However, by unparking always, the waiter will try to acquire the biased lock from us.
-- LockSupport.unpark(next.owner);
-- }
--
-- @Override
-- public void lockInterruptibly() throws InterruptedException {
-- throw new UnsupportedOperationException();
-- }
--
-- @Override
-- public boolean tryLock() {
-- throw new UnsupportedOperationException();
-- }
--
-- @Override
-- public boolean tryLock(long time, TimeUnit unit) throws InterruptedException {
-- throw new UnsupportedOperationException();
-- }
--
-- @Override
-- public Condition newCondition() {
-- throw new UnsupportedOperationException();
-- }
--
-- static final class LockWaiter {
--
-- static final int STATE_BIASED = 0;
--
-- private volatile LockWaiter next;
-- private volatile int state;
-- private Thread owner;
--
-- private static final VarHandle NEXT_HANDLE = ConcurrentUtil.getVarHandle(LockWaiter.class, "next", LockWaiter.class);
-- private static final VarHandle STATE_HANDLE = ConcurrentUtil.getVarHandle(LockWaiter.class, "state", int.class);
--
--
-- private LockWaiter compareAndExchangeNextVolatile(final LockWaiter expect, final LockWaiter update) {
-- return (LockWaiter)NEXT_HANDLE.compareAndExchange((LockWaiter)this, expect, update);
-- }
--
-- private void setNextPlain(final LockWaiter next) {
-- NEXT_HANDLE.set((LockWaiter)this, next);
-- }
--
-- private LockWaiter getNextOpaque() {
-- return (LockWaiter)NEXT_HANDLE.getOpaque((LockWaiter)this);
-- }
--
-- private LockWaiter getNextVolatile() {
-- return (LockWaiter)NEXT_HANDLE.getVolatile((LockWaiter)this);
-- }
--
--
--
-- private int getStatePlain() {
-- return (int)STATE_HANDLE.get((LockWaiter)this);
-- }
--
-- private int getStateVolatile() {
-- return (int)STATE_HANDLE.getVolatile((LockWaiter)this);
-- }
--
-- private void setStatePlain(final int value) {
-- STATE_HANDLE.set((LockWaiter)this, value);
-- }
--
-- private void setStateRelease(final int value) {
-- STATE_HANDLE.setRelease((LockWaiter)this, value);
-- }
--
-- public LockWaiter(final Thread owner, final int initialState, final LockWaiter next) {
-- this.owner = owner;
-- this.setStatePlain(initialState);
-- this.setNextPlain(next);
-- }
--
-- public int incrementState() {
-- final int old = this.getStatePlain();
-- // Technically, we DO NOT need release for old != BIASED. But we care about optimising only for x86,
-- // which is a simple MOV for everything but volatile.
-- this.setStateRelease(old + 1);
-- return old;
-- }
-- }
--}
-diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
-index d2386ee333927aedd9235212780fee04630a8510..bb5e5b9d48cb6d459119f66955017cced5af501c 100644
---- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
-+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
-@@ -93,7 +93,9 @@ public final class ChunkHolderManager {
- * Typically for region state we do not need to worry about threading concerns because it is only
- * accessed by the current region when ticking. But since this contains state (
- * tickets, and removeTickToChunkExpireTicketCount) that can be written to by any thread holding the
-- * ticket lock, the merge logic is complicated as merging only holds the region lock.
-+ * ticket lock, the merge logic is complicated as merging only holds the region lock. So, Folia has modified
-+ * the add and remove ticket functions to acquire the region lock if the current region does not own the target
-+ * position.
- */
- private final ArrayDeque pendingFullLoadUpdate = new ArrayDeque<>();
- private final ObjectRBTreeSet autoSaveQueue = new ObjectRBTreeSet<>((final NewChunkHolder c1, final NewChunkHolder c2) -> {
-@@ -573,6 +575,13 @@ public final class ChunkHolderManager {
- return false;
- }
-
-+ // Folia start - region threading
-+ final ThreadedRegioniser.ThreadedRegion currRegion = TickRegionScheduler.getCurrentRegion();
-+ final boolean lock = currRegion == null || this.world.regioniser.getRegionAtUnsynchronised(
-+ CoordinateUtils.getChunkX(chunk), CoordinateUtils.getChunkZ(chunk)
-+ ) != currRegion;
-+ // Folia end - region threading
-+
- this.ticketLock.lock();
- try {
- // Folia start - region threading
-@@ -585,7 +594,13 @@ public final class ChunkHolderManager {
- this.specialCaseUnload.add(holder);
- }
-
-- final ChunkHolderManager.HolderManagerRegionData targetData = this.getDataFor(chunk);
-+ if (lock) {
-+ // we just need to prevent merging, so we only need the read lock
-+ // additionally, this will prevent deadlock in the remove all tickets function by using the read lock
-+ this.world.regioniser.acquireReadLock();
-+ }
-+ try {
-+ final ChunkHolderManager.HolderManagerRegionData targetData = lock ? this.getDataFor(chunk) : currRegion.getData().getHolderManagerRegionData();
- // Folia end - region threading
- final long removeTick = removeDelay == 0 ? NO_TIMEOUT_MARKER : targetData.currentTick + removeDelay; // Folia - region threading
- final Ticket ticket = new Ticket<>(type, level, identifier, removeTick);
-@@ -631,6 +646,11 @@ public final class ChunkHolderManager {
- }
-
- return current == ticket;
-+ } finally { // Folia start - region threading
-+ if (lock) {
-+ this.world.regioniser.releaseReadLock();
-+ }
-+ } // Folia end - region threading
- } finally {
- this.ticketLock.unlock();
- }
-@@ -649,10 +669,24 @@ public final class ChunkHolderManager {
- return false;
- }
-
-+ // Folia start - region threading
-+ final ThreadedRegioniser.ThreadedRegion currRegion = TickRegionScheduler.getCurrentRegion();
-+ final boolean lock = currRegion == null || this.world.regioniser.getRegionAtUnsynchronised(
-+ CoordinateUtils.getChunkX(chunk), CoordinateUtils.getChunkZ(chunk)
-+ ) != currRegion;
-+ // Folia end - region threading
-+
- this.ticketLock.lock();
- try {
- // Folia start - region threading
-- final ChunkHolderManager.HolderManagerRegionData targetData = this.getDataFor(chunk);
-+ if (lock) {
-+ // we just need to prevent merging, so we only need the read lock
-+ // additionally, this will prevent deadlock in the remove all tickets function by using the read lock
-+ this.world.regioniser.acquireReadLock();
-+ }
-+ try {
-+ final ChunkHolderManager.HolderManagerRegionData targetData = lock ? this.getDataFor(chunk) : currRegion.getData().getHolderManagerRegionData();
-+ // Folia end - region threading
-
- final SortedArraySet> ticketsAtChunk = targetData == null ? null : targetData.tickets.get(chunk);
- // Folia end - region threading
-@@ -667,11 +701,28 @@ public final class ChunkHolderManager {
- return false;
- }
-
-+ int newLevel = getTicketLevelAt(ticketsAtChunk); // Folia - region threading - moved up from below
-+ // Folia start - region threading
-+ // we should not change the ticket levels while the target region may be ticking
-+ if (newLevel > level) {
-+ final long unknownRemoveTick = targetData.currentTick + Math.max(0, TicketType.UNKNOWN.timeout);
-+ final Ticket unknownTicket = new Ticket<>(TicketType.UNKNOWN, level, new ChunkPos(chunk), unknownRemoveTick);
-+ if (ticketsAtChunk.add(unknownTicket)) {
-+ targetData.removeTickToChunkExpireTicketCount.computeIfAbsent(unknownRemoveTick, (final long keyInMap) -> {
-+ return new Long2IntOpenHashMap();
-+ }).addTo(chunk, 1);
-+ } else {
-+ throw new IllegalStateException("Should have been able to add " + unknownTicket + " to " + ticketsAtChunk);
-+ }
-+ newLevel = level;
-+ }
-+ // Folia end - region threading
-+
- if (ticketsAtChunk.isEmpty()) {
- targetData.tickets.remove(chunk); // Folia - region threading
- }
-
-- final int newLevel = getTicketLevelAt(ticketsAtChunk);
-+ // Folia - region threading - move up
-
- final long removeTick = ticket.removalTick;
- if (removeTick != NO_TIMEOUT_MARKER) {
-@@ -690,14 +741,12 @@ public final class ChunkHolderManager {
- this.updateTicketLevel(chunk, newLevel);
- }
-
-- // Folia start - region threading
-- // we should not change the ticket levels while the target region may be ticking
-- if (newLevel > level) {
-- this.addTicketAtLevel(TicketType.UNKNOWN, chunk, level, new ChunkPos(chunk));
-- }
-- // Folia end - region threading
--
- return true;
-+ } finally { // Folia start - region threading
-+ if (lock) {
-+ this.world.regioniser.releaseReadLock();
-+ }
-+ } // Folia end - region threading
- } finally {
- this.ticketLock.unlock();
- }
-diff --git a/src/main/java/io/papermc/paper/threadedregions/ThreadedRegioniser.java b/src/main/java/io/papermc/paper/threadedregions/ThreadedRegioniser.java
-index f6e41c466ba2501f82fd7916742c5fc045ddf828..2334e62953a4d0a415c4c1fe653b6da063119868 100644
---- a/src/main/java/io/papermc/paper/threadedregions/ThreadedRegioniser.java
-+++ b/src/main/java/io/papermc/paper/threadedregions/ThreadedRegioniser.java
-@@ -130,6 +130,14 @@ public final class ThreadedRegioniser