From a8c7a6c20195b9a1f3c31d8aca32c152f385bcc7 Mon Sep 17 00:00:00 2001 From: Alan McGovern Date: Tue, 31 Jan 2023 23:33:17 +0000 Subject: [PATCH] [core] Tweak ratelimiting a little better for low rates (#618) * [core] Tweak ratelimiting a little better for low rates Fix another issue with a ratelimit of 1 byte/second. Hopefully no-one was actually trying to use a limit this low. Additionally, simplify this logic now. Rate limits are much easier than they used to be as the engine works in bytes, not 'blocks' or 'chunks'. Back in the day there had to be estimates made for the amount of data which would be received for a given 'ReceiveAsync' call. Now we're just counting the raw number of bytes sent/received, or read/written, so we don't need complicated math to account for over/under-shoot. --- .../IRateLimiter.cs | 7 -- .../RateLimiter.cs | 32 ++------- .../RateLimiterGroup.cs | 10 --- .../MonoTorrent.Client/ClientEngine.cs | 28 +------- .../Managers/DiskManager.cs | 4 +- .../Managers/TorrentManager.cs | 6 +- .../MonoTorrent.Client/NetworkIO.cs | 12 ++-- .../SimpleTorrentManager.cs | 1 + .../TrackerServer.cs | 3 +- .../RateLimiterTests.cs | 2 +- .../MonoTorrent.Client/DiskManagerTests.cs | 68 +++++++++++-------- .../MonoTorrent.Client/NetworkIOTests.cs | 33 --------- 12 files changed, 59 insertions(+), 147 deletions(-) diff --git a/src/MonoTorrent.Client/MonoTorrent.Client.RateLimiters/IRateLimiter.cs b/src/MonoTorrent.Client/MonoTorrent.Client.RateLimiters/IRateLimiter.cs index 55cc240cc..45740f330 100644 --- a/src/MonoTorrent.Client/MonoTorrent.Client.RateLimiters/IRateLimiter.cs +++ b/src/MonoTorrent.Client/MonoTorrent.Client.RateLimiters/IRateLimiter.cs @@ -31,13 +31,6 @@ namespace MonoTorrent.Client.RateLimiters { interface IRateLimiter { - /// - /// When this returns there is no preference on - /// how large each chunk of work should be. Otherwise, work should be processed - /// in chunks of this size. - /// - int? PreferredChunkSize { get; } - /// /// Returns true if there is sufficient capacity left in the rate limiter to /// process the specified amount of data. Also returns true if diff --git a/src/MonoTorrent.Client/MonoTorrent.Client.RateLimiters/RateLimiter.cs b/src/MonoTorrent.Client/MonoTorrent.Client.RateLimiters/RateLimiter.cs index 284e6b85d..71fb70cc4 100644 --- a/src/MonoTorrent.Client/MonoTorrent.Client.RateLimiters/RateLimiter.cs +++ b/src/MonoTorrent.Client/MonoTorrent.Client.RateLimiters/RateLimiter.cs @@ -27,48 +27,26 @@ // +using System; using System.Threading; namespace MonoTorrent.Client.RateLimiters { sealed class RateLimiter : IRateLimiter { - long savedError; long chunks; - public int? PreferredChunkSize { get; private set; } - public bool Unlimited { get; set; } public RateLimiter () { - UpdateChunks (0, 0, null); + UpdateChunks (0); } - public void UpdateChunks (long maxRate, long actualRate, int? preferredChunkSize) + public void UpdateChunks (long maxRate) { - PreferredChunkSize = preferredChunkSize; Unlimited = maxRate == 0; - if (Unlimited) - return; - - // From experimentation, i found that increasing by 5% gives more accuate rate limiting - // for peer communications. For disk access and whatnot, a 5% overshoot is fine. - maxRate = (long) (maxRate * 1.05); - long errorRateDown = maxRate - actualRate; - long delta = (long) (0.4 * errorRateDown + 0.6 * savedError); - savedError = errorRateDown; - - long increaseAmount = maxRate + delta; - Interlocked.Add (ref chunks, increaseAmount); - if (chunks > (maxRate * 1.2)) - Interlocked.Exchange (ref chunks, (int) (maxRate * 1.2)); - - if (chunks < (maxRate / 2)) - Interlocked.Exchange (ref chunks, (maxRate / 2)); - - if (maxRate == 0) - chunks = 0; + Interlocked.Exchange(ref chunks, maxRate); } public bool TryProcess (long amount) @@ -79,7 +57,7 @@ public bool TryProcess (long amount) long c; do { c = Interlocked.Read (ref chunks); - if (c < 0) + if (c <= 0) return false; } while (Interlocked.CompareExchange (ref chunks, c - amount, c) != c); diff --git a/src/MonoTorrent.Client/MonoTorrent.Client.RateLimiters/RateLimiterGroup.cs b/src/MonoTorrent.Client/MonoTorrent.Client.RateLimiters/RateLimiterGroup.cs index 14b3fd9ec..f6b4b0fbd 100644 --- a/src/MonoTorrent.Client/MonoTorrent.Client.RateLimiters/RateLimiterGroup.cs +++ b/src/MonoTorrent.Client/MonoTorrent.Client.RateLimiters/RateLimiterGroup.cs @@ -37,16 +37,6 @@ sealed class RateLimiterGroup : IRateLimiter, IEnumerable { readonly List limiters; - public int? PreferredChunkSize { - get { - int? preferredChunkSize = null; - for (int i = 0; i < limiters.Count; i++) - if (limiters[i].PreferredChunkSize.HasValue) - preferredChunkSize = preferredChunkSize.HasValue ? Math.Min (limiters[i].PreferredChunkSize!.Value, preferredChunkSize.Value) : limiters[i].PreferredChunkSize!.Value; - return preferredChunkSize; - } - } - public bool Unlimited { get { for (int i = 0; i < limiters.Count; i++) diff --git a/src/MonoTorrent.Client/MonoTorrent.Client/ClientEngine.cs b/src/MonoTorrent.Client/MonoTorrent.Client/ClientEngine.cs index 6763b96e8..4d4cf2eff 100644 --- a/src/MonoTorrent.Client/MonoTorrent.Client/ClientEngine.cs +++ b/src/MonoTorrent.Client/MonoTorrent.Client/ClientEngine.cs @@ -773,37 +773,13 @@ public async Task StopAllAsync (TimeSpan timeout) #region Private/Internal methods - - internal static int? PreferredChunkSize (int maxSpeedEngine, int maxSpeedTorrent) - { - // Unlimited - if (maxSpeedEngine == 0 && maxSpeedTorrent == 0) - return null; - - int maxSpeed; - if (maxSpeedEngine != 0 && maxSpeedTorrent != 0) - maxSpeed = Math.Min (maxSpeedEngine, maxSpeedTorrent); - else - maxSpeed = Math.Max (maxSpeedEngine, maxSpeedTorrent); - - // The max we transmit for a single socket call is 16kB as that is the size of a - // single block. If the transfer rate is unlimited, or we can transfer greater - // than 256kB/sec then continue using 'unlimited' sized chunks. Otherwise restrict - // individual calls to 4kB to try and keep things reasonably evenly distributed. - if (maxSpeed == 0 || maxSpeed > 16 * 16 * 1024) - return null; - - // If the limit is below 256 kB/sec then we can communicate in 4kB chunks - return 4096 + 32; - } - void LogicTick () { tickCount++; if (tickCount % 2 == 0) { - downloadLimiter.UpdateChunks (Settings.MaximumDownloadRate, TotalDownloadRate, PreferredChunkSize (Settings.MaximumDownloadRate, 0)); - uploadLimiter.UpdateChunks (Settings.MaximumUploadRate, TotalUploadRate, PreferredChunkSize (Settings.MaximumUploadRate, 0)); + downloadLimiter.UpdateChunks (Settings.MaximumDownloadRate); + uploadLimiter.UpdateChunks (Settings.MaximumUploadRate); } ConnectionManager.CancelPendingConnects (); diff --git a/src/MonoTorrent.Client/MonoTorrent.Client/Managers/DiskManager.cs b/src/MonoTorrent.Client/MonoTorrent.Client/Managers/DiskManager.cs index dc08c4c9e..f363eb0b7 100644 --- a/src/MonoTorrent.Client/MonoTorrent.Client/Managers/DiskManager.cs +++ b/src/MonoTorrent.Client/MonoTorrent.Client/Managers/DiskManager.cs @@ -661,8 +661,8 @@ ReusableTask Tick (int delta, bool waitForBufferedIO) WriterReadMonitor.Tick (delta); WriterWriteMonitor.Tick (delta); - WriteLimiter.UpdateChunks (Settings.MaximumDiskWriteRate, WriteRate, null); - ReadLimiter.UpdateChunks (Settings.MaximumDiskReadRate, ReadRate, null); + WriteLimiter.UpdateChunks (Settings.MaximumDiskWriteRate); + ReadLimiter.UpdateChunks (Settings.MaximumDiskReadRate); ReusableTask processTask = ProcessBufferedIOAsync (); return waitForBufferedIO ? processTask : ReusableTask.CompletedTask; diff --git a/src/MonoTorrent.Client/MonoTorrent.Client/Managers/TorrentManager.cs b/src/MonoTorrent.Client/MonoTorrent.Client/Managers/TorrentManager.cs index 09f321647..1caa90dbe 100644 --- a/src/MonoTorrent.Client/MonoTorrent.Client/Managers/TorrentManager.cs +++ b/src/MonoTorrent.Client/MonoTorrent.Client/Managers/TorrentManager.cs @@ -602,7 +602,7 @@ public async Task MoveFileAsync (ITorrentManagerFile file, string path) throw new TorrentException ("Cannot move files when the torrent is active"); try { - var paths = TorrentFileInfo.GetNewPaths (Path.GetFullPath (path), Engine.Settings.UsePartialFiles, file.Path == file.DownloadCompleteFullPath); + var paths = TorrentFileInfo.GetNewPaths (Path.GetFullPath (path), Engine!.Settings.UsePartialFiles, file.Path == file.DownloadCompleteFullPath); await Engine!.DiskManager.MoveFileAsync (file, paths); } catch (Exception ex) { TrySetError (Reason.WriteFailure, ex); @@ -1034,8 +1034,8 @@ internal void RaiseConnectionAttemptFailed (ConnectionAttemptFailedEventArgs arg internal void UpdateLimiters () { if (Engine != null) { - DownloadLimiter.UpdateChunks (Settings.MaximumDownloadRate, Monitor.DownloadRate, ClientEngine.PreferredChunkSize (Engine.Settings.MaximumDownloadRate, Settings.MaximumDownloadRate)); - UploadLimiter.UpdateChunks (Settings.MaximumUploadRate, Monitor.UploadRate, ClientEngine.PreferredChunkSize (Engine.Settings.MaximumUploadRate, Settings.MaximumUploadRate)); + DownloadLimiter.UpdateChunks (Settings.MaximumDownloadRate); + UploadLimiter.UpdateChunks (Settings.MaximumUploadRate); } } #endregion Internal Methods diff --git a/src/MonoTorrent.Client/MonoTorrent.Client/NetworkIO.cs b/src/MonoTorrent.Client/MonoTorrent.Client/NetworkIO.cs index 5422039d4..c5607961f 100644 --- a/src/MonoTorrent.Client/MonoTorrent.Client/NetworkIO.cs +++ b/src/MonoTorrent.Client/MonoTorrent.Client/NetworkIO.cs @@ -129,15 +129,14 @@ public static async ReusableTask ReceiveAsync (IPeerConnection connection, Memor while (buffer.Length > 0) { int transferred; bool unlimited = rateLimiter?.Unlimited ?? true; - int shouldRead = unlimited || !rateLimiter!.PreferredChunkSize.HasValue ? buffer.Length : Math.Min (rateLimiter.PreferredChunkSize.Value, buffer.Length); - if (rateLimiter != null && !unlimited && !rateLimiter.TryProcess (shouldRead)) { + if (rateLimiter != null && !unlimited && !rateLimiter.TryProcess (buffer.Length)) { var tcs = new ReusableTaskCompletionSource (); lock (receiveQueue) receiveQueue.Enqueue (new QueuedIO (connection, buffer, rateLimiter, tcs)); transferred = await tcs.Task.ConfigureAwait (false); } else { - transferred = await connection.ReceiveAsync (buffer.Slice (0, shouldRead)).ConfigureAwait (false); + transferred = await connection.ReceiveAsync (buffer).ConfigureAwait (false); } if (transferred == 0) @@ -162,15 +161,14 @@ public static async ReusableTask SendAsync (IPeerConnection connection, Memory 0) { int transferred; bool unlimited = rateLimiter?.Unlimited ?? true; - int shouldRead = unlimited || !rateLimiter!.PreferredChunkSize.HasValue ? buffer.Length : Math.Min (rateLimiter.PreferredChunkSize.Value, buffer.Length); - if (rateLimiter != null && !unlimited && !rateLimiter.TryProcess (shouldRead)) { + if (rateLimiter != null && !unlimited && !rateLimiter.TryProcess (buffer.Length)) { var tcs = new ReusableTaskCompletionSource (); lock (sendQueue) - sendQueue.Enqueue (new QueuedIO (connection, buffer.Slice (0, shouldRead), rateLimiter, tcs)); + sendQueue.Enqueue (new QueuedIO (connection, buffer, rateLimiter, tcs)); transferred = await tcs.Task.ConfigureAwait (false); } else { - transferred = await connection.SendAsync (buffer.Slice (0, shouldRead)).ConfigureAwait (false); + transferred = await connection.SendAsync (buffer).ConfigureAwait (false); } if (transferred == 0) diff --git a/src/MonoTorrent.Client/MonoTorrent.TrackerServer/SimpleTorrentManager.cs b/src/MonoTorrent.Client/MonoTorrent.TrackerServer/SimpleTorrentManager.cs index 39ff5c43e..6a0fe10cd 100644 --- a/src/MonoTorrent.Client/MonoTorrent.TrackerServer/SimpleTorrentManager.cs +++ b/src/MonoTorrent.Client/MonoTorrent.TrackerServer/SimpleTorrentManager.cs @@ -146,6 +146,7 @@ public List GetPeers (AddressFamily addressFamily) /// The bencoded dictionary to add the peers to /// The number of peers to add /// True if the peers should be in compact form + /// internal void GetPeers (BEncodedDictionary response, int count, bool compact, AddressFamily addressFamily) { byte[]? compactResponse = null; diff --git a/src/MonoTorrent.Client/MonoTorrent.TrackerServer/TrackerServer.cs b/src/MonoTorrent.Client/MonoTorrent.TrackerServer/TrackerServer.cs index 537076a27..13085e44f 100644 --- a/src/MonoTorrent.Client/MonoTorrent.TrackerServer/TrackerServer.cs +++ b/src/MonoTorrent.Client/MonoTorrent.TrackerServer/TrackerServer.cs @@ -191,8 +191,7 @@ public bool Add (ITrackable trackable, IPeerComparer comparer) /// /// Adds the trackable to the server /// - /// The trackable to add - /// The comparer used to decide whether two peers are the same. + /// . /// internal bool Add (SimpleTorrentManager manager) { diff --git a/src/Tests/Tests.MonoTorrent.Client/MonoTorrent.Client.RateLimiters/RateLimiterTests.cs b/src/Tests/Tests.MonoTorrent.Client/MonoTorrent.Client.RateLimiters/RateLimiterTests.cs index 6209ff09b..31f24c8a6 100644 --- a/src/Tests/Tests.MonoTorrent.Client/MonoTorrent.Client.RateLimiters/RateLimiterTests.cs +++ b/src/Tests/Tests.MonoTorrent.Client/MonoTorrent.Client.RateLimiters/RateLimiterTests.cs @@ -51,7 +51,7 @@ public class RateLimiterTests public void ChunkSizeLargerThanRateLimit () { var rateLimiter = new RateLimiter (); - rateLimiter.UpdateChunks (10, 10, 10); + rateLimiter.UpdateChunks (10); // We can process any size chunk as long as there's some rate limit left Assert.IsTrue (rateLimiter.TryProcess (11)); diff --git a/src/Tests/Tests.MonoTorrent.Client/MonoTorrent.Client/DiskManagerTests.cs b/src/Tests/Tests.MonoTorrent.Client/MonoTorrent.Client/DiskManagerTests.cs index 261b433c1..4bc6eaa84 100644 --- a/src/Tests/Tests.MonoTorrent.Client/MonoTorrent.Client/DiskManagerTests.cs +++ b/src/Tests/Tests.MonoTorrent.Client/MonoTorrent.Client/DiskManagerTests.cs @@ -220,21 +220,29 @@ public async Task ExceedReadRate () await diskManager.UpdateSettingsAsync (new EngineSettingsBuilder { MaximumDiskReadRate = 1 }.ToSettings ()); await diskManager.Tick (1000).WithTimeout (); - // Queue up 6 reads, none should process. + // Queue up 7 reads, 1 should process. var buffer = new byte[Constants.BlockSize]; - int count = 6; var tasks = new List (); - for (int i = 0; i < count; i++) + for (int i = 0; i < 7 + 1; i++) tasks.Add (diskManager.ReadAsync (fileData, new BlockInfo (0, 0, buffer.Length), buffer).AsTask ()); - Assert.AreEqual (buffer.Length * count, diskManager.PendingReadBytes, "#1"); + // Wait for the first task to complete. + var doneTask = await Task.WhenAny (tasks).WithTimeout (); + tasks.Remove (doneTask); + await doneTask; - // We should still process none. + Assert.AreEqual (buffer.Length * tasks.Count, diskManager.PendingReadBytes, "#1"); + + // This should process one too. await diskManager.Tick (1000).WithTimeout (); - Assert.AreEqual (buffer.Length * count, diskManager.PendingReadBytes, "#2"); + doneTask = await Task.WhenAny (tasks).WithTimeout (); + tasks.Remove (doneTask); + await doneTask; - // Give a proper max read rate. - await diskManager.UpdateSettingsAsync (new EngineSettingsBuilder { MaximumDiskReadRate = Constants.BlockSize * 2 }.ToSettings ()); + Assert.AreEqual (buffer.Length * tasks.Count, diskManager.PendingReadBytes, "#2"); + + // Give a max read rate which allows at least 2 blocks to read. + await diskManager.UpdateSettingsAsync (new EngineSettingsBuilder { MaximumDiskReadRate = (int)(Constants.BlockSize * 1.8) }.ToSettings ()); for (int i = 0; i < 2; i++) { await diskManager.Tick (1000).WithTimeout (); @@ -245,17 +253,15 @@ public async Task ExceedReadRate () } Assert.IsFalse (tasks.Any (t => t.IsCompleted)); - count -= 2; - Assert.AreEqual (buffer.Length * count, diskManager.PendingReadBytes, "#3." + i); + Assert.AreEqual (buffer.Length * tasks.Count, diskManager.PendingReadBytes, "#3." + i); } // If we add more reads after we used up our allowance they still won't process. for (int i = 0; i < 2; i++) { - count++; tasks.Add (diskManager.ReadAsync (fileData, new BlockInfo (0, 0, buffer.Length), buffer).AsTask ()); } - Assert.AreEqual (buffer.Length * count, diskManager.PendingReadBytes, "#4." + count); - while (count > 0) { + Assert.AreEqual (buffer.Length * tasks.Count, diskManager.PendingReadBytes, "#4"); + while (tasks.Count > 0) { await diskManager.Tick (1000).WithTimeout (); for (int t = 0; t < 2; t++) { @@ -265,8 +271,7 @@ public async Task ExceedReadRate () } Assert.IsFalse (tasks.Any (t => t.IsCompleted)); - count -= 2; - Assert.AreEqual (buffer.Length * count, diskManager.PendingReadBytes, "#5." + count); + Assert.AreEqual (buffer.Length * tasks.Count, diskManager.PendingReadBytes, "#5"); } } @@ -277,19 +282,26 @@ public async Task ExceedWriteRate () await diskManager.UpdateSettingsAsync (new EngineSettingsBuilder { MaximumDiskWriteRate = 1, DiskCacheBytes = 0 }.ToSettings ()); await diskManager.Tick (1000); - // Queue up 6 reads, none should process. + // Queue up 6 reads, 1 should process. var buffer = new byte[Constants.BlockSize]; - int count = 6; var tasks = new List (); - for (int i = 0; i < count; i++) + for (int i = 0; i < 8; i++) tasks.Add (diskManager.WriteAsync (fileData, new BlockInfo (i / 3, Constants.BlockSize * (i % 3), Constants.BlockSize), buffer).AsTask ()); - Assert.AreEqual (buffer.Length * count, diskManager.PendingWriteBytes, "#1"); + // Wait for the first task to complete. + var doneTask = await Task.WhenAny (tasks).WithTimeout (); + tasks.Remove (doneTask); + await doneTask; - // We should still process none. + Assert.AreEqual (buffer.Length * tasks.Count, diskManager.PendingWriteBytes, "#1"); + + // We should still process one. await diskManager.Tick (1000); + doneTask = await Task.WhenAny (tasks).WithTimeout (); + tasks.Remove (doneTask); + await doneTask; - Assert.AreEqual (buffer.Length * count, diskManager.PendingWriteBytes, "#2"); + Assert.AreEqual (buffer.Length * tasks.Count, diskManager.PendingWriteBytes, "#2"); // Give a proper max read rate. await diskManager.UpdateSettingsAsync (new EngineSettingsBuilder { MaximumDiskWriteRate = Constants.BlockSize * 2, DiskCacheBytes = 0 }.ToSettings ()); @@ -303,16 +315,14 @@ public async Task ExceedWriteRate () } Assert.IsFalse (tasks.Any (t => t.IsCompleted)); - count -= 2; - Assert.AreEqual (buffer.Length * count, diskManager.PendingWriteBytes, "#3." + i); + Assert.AreEqual (buffer.Length * tasks.Count, diskManager.PendingWriteBytes, "#3." + i); } // If we add more writes after we used up our allowance they still won't process. for (int i = 0; i < 2; i++) { - count++; tasks.Add (diskManager.WriteAsync (fileData, new BlockInfo (0, Constants.BlockSize * i, Constants.BlockSize), buffer).AsTask ()); } - Assert.AreEqual (buffer.Length * count, diskManager.PendingWriteBytes, "#4"); + Assert.AreEqual (buffer.Length * tasks.Count, diskManager.PendingWriteBytes, "#4"); while (diskManager.PendingWriteBytes > 0) { await diskManager.Tick (1000); @@ -323,8 +333,7 @@ public async Task ExceedWriteRate () } Assert.IsFalse (tasks.Any (t => t.IsCompleted)); - count -= 2; - Assert.AreEqual (buffer.Length * count, diskManager.PendingWriteBytes, "#5." + diskManager.PendingWriteBytes); + Assert.AreEqual (buffer.Length * tasks.Count, diskManager.PendingWriteBytes, "#5." + diskManager.PendingWriteBytes); } } @@ -466,12 +475,13 @@ public async Task ReadPieceTwo () public async Task ReadRate () { var buffer = new byte[Constants.BlockSize]; - await diskManager.UpdateSettingsAsync (new EngineSettingsBuilder { MaximumDiskReadRate = Constants.BlockSize, DiskCacheBytes = 0 }.ToSettings ()); + await diskManager.UpdateSettingsAsync (new EngineSettingsBuilder { MaximumDiskReadRate = 1, DiskCacheBytes = 0 }.ToSettings ()); await diskManager.Tick (1000); var tasks = new List (); for (int i = 0; i < SpeedMonitor.DefaultAveragePeriod + 1; i++) tasks.Add (diskManager.ReadAsync (fileData, new BlockInfo (0, 0, Constants.BlockSize), buffer).AsTask ()); + while (diskManager.PendingReadBytes > 0) { await diskManager.Tick (1000); var done = await Task.WhenAny (tasks).WithTimeout (); @@ -694,7 +704,7 @@ public async Task WritePiece_ReverseOrder () public async Task WriteRate () { var buffer = new byte[Constants.BlockSize]; - await diskManager.UpdateSettingsAsync (new EngineSettingsBuilder { MaximumDiskWriteRate = Constants.BlockSize, DiskCacheBytes = 0 }.ToSettings ()); + await diskManager.UpdateSettingsAsync (new EngineSettingsBuilder { MaximumDiskWriteRate = 1, DiskCacheBytes = 0 }.ToSettings ()); await diskManager.Tick (1000); var tasks = new List (); diff --git a/src/Tests/Tests.MonoTorrent.Client/MonoTorrent.Client/NetworkIOTests.cs b/src/Tests/Tests.MonoTorrent.Client/MonoTorrent.Client/NetworkIOTests.cs index 552f93fe3..597298295 100644 --- a/src/Tests/Tests.MonoTorrent.Client/MonoTorrent.Client/NetworkIOTests.cs +++ b/src/Tests/Tests.MonoTorrent.Client/MonoTorrent.Client/NetworkIOTests.cs @@ -125,24 +125,6 @@ public async Task DisposeBeforeSend () } } - [Test] - public async Task ReceiveData_RateLimited () - { - // Allow 1 megabyte worth of data - var oneMegabyte = 1 * 1024 * 1024; - var limiter = new RateLimiter (); - limiter.UpdateChunks (oneMegabyte, oneMegabyte, NetworkIO.ChunkLength); - - using var r1 = MemoryPool.Default.Rent (oneMegabyte, out Memory sendBuffer); - using var r2 = MemoryPool.Default.Rent (oneMegabyte, out Memory receiveBuffer); - - await Outgoing.SendAsync (sendBuffer); - await NetworkIO.ReceiveAsync (Incoming, receiveBuffer, limiter, null, null); - - var expectedChunks = (int) Math.Ceiling (oneMegabyte / (double) NetworkIO.ChunkLength); - Assert.AreEqual (expectedChunks, Incoming.Receives.Count, "#1"); - } - [Test] public async Task ReceiveData_Unlimited () { @@ -231,21 +213,6 @@ public async Task SendData () await DoSend (false, false); } - [Test] - public async Task SendData_RateLimited () - { - // Allow 1 megabyte worth of data - var oneMegabyte = 1 * 1024 * 1024; - var limiter = new RateLimiter (); - limiter.UpdateChunks (oneMegabyte, oneMegabyte, NetworkIO.ChunkLength); - - using var releaser = MemoryPool.Default.Rent (oneMegabyte, out Memory buffer); - await NetworkIO.SendAsync (Incoming, buffer, limiter, null, null); - - var expectedChunks = (int) Math.Ceiling (oneMegabyte / (double) NetworkIO.ChunkLength); - Assert.AreEqual (expectedChunks, Incoming.Sends.Count, "#1"); - } - [Test] public async Task SendData_Unlimited () {