Skip to content

Commit f814e1e

Browse files
authored
Split Blazor JS Interop topic (#17059)
1 parent eb9557b commit f814e1e

22 files changed

+979
-938
lines changed

.openpublishing.redirection.json

+5
Original file line numberDiff line numberDiff line change
@@ -914,6 +914,11 @@
914914
"source_path": "aspnetcore/razor-pages/upload-files.md",
915915
"redirect_url": "/aspnet/core/mvc/models/file-uploads",
916916
"redirect_document_id": false
917+
},
918+
{
919+
"source_path": "aspnetcore/blazor/javascript-interop.md",
920+
"redirect_url": "/aspnet/core/blazor/call-javascript-from-dotnet",
921+
"redirect_document_id": false
917922
}
918923
]
919924
}

aspnetcore/blazor/advanced-scenarios.md

+278
Original file line numberDiff line numberDiff line change
@@ -209,3 +209,281 @@ This is a trivial example. In more realistic cases with complex and deeply neste
209209
* Don't write long blocks of manually-implemented `RenderTreeBuilder` logic. Prefer *.razor* files and allow the compiler to deal with the sequence numbers. If you're unable to avoid manual `RenderTreeBuilder` logic, split long blocks of code into smaller pieces wrapped in `OpenRegion`/`CloseRegion` calls. Each region has its own separate space of sequence numbers, so you can restart from zero (or any other arbitrary number) inside each region.
210210
* If sequence numbers are hardcoded, the diff algorithm only requires that sequence numbers increase in value. The initial value and gaps are irrelevant. One legitimate option is to use the code line number as the sequence number, or start from zero and increase by ones or hundreds (or any preferred interval).
211211
* Blazor uses sequence numbers, while other tree-diffing UI frameworks don't use them. Diffing is far faster when sequence numbers are used, and Blazor has the advantage of a compile step that deals with sequence numbers automatically for developers authoring *.razor* files.
212+
213+
## Perform large data transfers in Blazor Server apps
214+
215+
In some scenarios, large amounts of data must be transferred between JavaScript and Blazor. Typically, large data transfers occur when:
216+
217+
* Browser file system APIs are used to upload or download a file.
218+
* Interop with a third party library is required.
219+
220+
In Blazor Server, a limitation is in place to prevent passing single large messages that may result in performance issues.
221+
222+
Consider the following guidance when developing code that transfers data between JavaScript and Blazor:
223+
224+
* Slice the data into smaller pieces, and send the data segments sequentially until all of the data is received by the server.
225+
* Don't allocate large objects in JavaScript and C# code.
226+
* Don't block the main UI thread for long periods when sending or receiving data.
227+
* Free any memory consumed when the process is completed or cancelled.
228+
* Enforce the following additional requirements for security purposes:
229+
* Declare the maximum file or data size that can be passed.
230+
* Declare the minimum upload rate from the client to the server.
231+
* After the data is received by the server, the data can be:
232+
* Temporarily stored in a memory buffer until all of the segments are collected.
233+
* Consumed immediately. For example, the data can be stored immediately in a database or written to disk as each segment is received.
234+
235+
The following file uploader class handles JS interop with the client. The uploader class uses JS interop to:
236+
237+
* Poll the client to send a data segment.
238+
* Abort the transaction if polling times out.
239+
240+
```csharp
241+
using System;
242+
using System.Buffers;
243+
using System.Collections.Generic;
244+
using System.IO;
245+
using System.Threading.Tasks;
246+
using Microsoft.JSInterop;
247+
248+
public class FileUploader : IDisposable
249+
{
250+
private readonly IJSRuntime _jsRuntime;
251+
private readonly int _segmentSize = 6144;
252+
private readonly int _maxBase64SegmentSize = 8192;
253+
private readonly DotNetObjectReference<FileUploader> _thisReference;
254+
private List<IMemoryOwner<byte>> _uploadedSegments =
255+
new List<IMemoryOwner<byte>>();
256+
257+
public FileUploader(IJSRuntime jsRuntime)
258+
{
259+
_jsRuntime = jsRuntime;
260+
}
261+
262+
public async Task<Stream> ReceiveFile(string selector, int maxSize)
263+
{
264+
var fileSize =
265+
await _jsRuntime.InvokeAsync<int>("getFileSize", selector);
266+
267+
if (fileSize > maxSize)
268+
{
269+
return null;
270+
}
271+
272+
var numberOfSegments = Math.Floor(fileSize / (double)_segmentSize) + 1;
273+
var lastSegmentBytes = 0;
274+
string base64EncodedSegment;
275+
276+
for (var i = 0; i < numberOfSegments; i++)
277+
{
278+
try
279+
{
280+
base64EncodedSegment =
281+
await _jsRuntime.InvokeAsync<string>(
282+
"receiveSegment", i, selector);
283+
284+
if (base64EncodedSegment.Length < _maxBase64SegmentSize &&
285+
i < numberOfSegments - 1)
286+
{
287+
return null;
288+
}
289+
}
290+
catch
291+
{
292+
return null;
293+
}
294+
295+
var current = MemoryPool<byte>.Shared.Rent(_segmentSize);
296+
297+
if (!Convert.TryFromBase64String(base64EncodedSegment,
298+
current.Memory.Slice(0, _segmentSize).Span, out lastSegmentBytes))
299+
{
300+
return null;
301+
}
302+
303+
_uploadedSegments.Add(current);
304+
}
305+
306+
var segments = _uploadedSegments;
307+
_uploadedSegments = null;
308+
309+
return new SegmentedStream(segments, _segmentSize, lastSegmentBytes);
310+
}
311+
312+
public void Dispose()
313+
{
314+
if (_uploadedSegments != null)
315+
{
316+
foreach (var segment in _uploadedSegments)
317+
{
318+
segment.Dispose();
319+
}
320+
}
321+
}
322+
}
323+
```
324+
325+
In the preceding example:
326+
327+
* The `_maxBase64SegmentSize` is set to `8192`, which is calculated from `_maxBase64SegmentSize = _segmentSize * 4 / 3`.
328+
* Low-level .NET Core memory management APIs are used to store the memory segments on the server in `_uploadedSegments`.
329+
* A `ReceiveFile` method is used to handle the upload through JS interop:
330+
* The file size is determined in bytes through JS interop with `_jsRuntime.InvokeAsync<FileInfo>('getFileSize', selector)`.
331+
* The number of segments to receive are calculated and stored in `numberOfSegments`.
332+
* The segments are requested in a `for` loop through JS interop with `_jsRuntime.InvokeAsync<string>('receiveSegment', i, selector)`. All segments but the last must be 8,192 bytes before decoding. The client is forced to send the data in an efficient manner.
333+
* For each segment received, checks are performed before decoding with <xref:System.Convert.TryFromBase64String*>.
334+
* A stream with the data is returned as a new <xref:System.IO.Stream> (`SegmentedStream`) after the upload is complete.
335+
336+
The segmented stream class exposes the list of segments as a readonly non-seekable <xref:System.IO.Stream>:
337+
338+
```csharp
339+
using System;
340+
using System.Buffers;
341+
using System.Collections.Generic;
342+
using System.IO;
343+
344+
public class SegmentedStream : Stream
345+
{
346+
private readonly ReadOnlySequence<byte> _sequence;
347+
private long _currentPosition = 0;
348+
349+
public SegmentedStream(IList<IMemoryOwner<byte>> segments, int segmentSize,
350+
int lastSegmentSize)
351+
{
352+
if (segments.Count == 1)
353+
{
354+
_sequence = new ReadOnlySequence<byte>(
355+
segments[0].Memory.Slice(0, lastSegmentSize));
356+
return;
357+
}
358+
359+
var sequenceSegment = new BufferSegment<byte>(
360+
segments[0].Memory.Slice(0, segmentSize));
361+
var lastSegment = sequenceSegment;
362+
363+
for (int i = 1; i < segments.Count; i++)
364+
{
365+
var isLastSegment = i + 1 == segments.Count;
366+
lastSegment = lastSegment.Append(segments[i].Memory.Slice(
367+
0, isLastSegment ? lastSegmentSize : segmentSize));
368+
}
369+
370+
_sequence = new ReadOnlySequence<byte>(
371+
sequenceSegment, 0, lastSegment, lastSegmentSize);
372+
}
373+
374+
public override long Position
375+
{
376+
get => throw new NotImplementedException();
377+
set => throw new NotImplementedException();
378+
}
379+
380+
public override int Read(byte[] buffer, int offset, int count)
381+
{
382+
var bytesToWrite = (int)(_currentPosition + count < _sequence.Length ?
383+
count : _sequence.Length - _currentPosition);
384+
var data = _sequence.Slice(_currentPosition, bytesToWrite);
385+
data.CopyTo(buffer.AsSpan(offset, bytesToWrite));
386+
_currentPosition += bytesToWrite;
387+
388+
return bytesToWrite;
389+
}
390+
391+
private class BufferSegment<T> : ReadOnlySequenceSegment<T>
392+
{
393+
public BufferSegment(ReadOnlyMemory<T> memory)
394+
{
395+
Memory = memory;
396+
}
397+
398+
public BufferSegment<T> Append(ReadOnlyMemory<T> memory)
399+
{
400+
var segment = new BufferSegment<T>(memory)
401+
{
402+
RunningIndex = RunningIndex + Memory.Length
403+
};
404+
405+
Next = segment;
406+
407+
return segment;
408+
}
409+
}
410+
411+
public override bool CanRead => true;
412+
413+
public override bool CanSeek => false;
414+
415+
public override bool CanWrite => false;
416+
417+
public override long Length => throw new NotImplementedException();
418+
419+
public override void Flush() => throw new NotImplementedException();
420+
421+
public override long Seek(long offset, SeekOrigin origin) =>
422+
throw new NotImplementedException();
423+
424+
public override void SetLength(long value) =>
425+
throw new NotImplementedException();
426+
427+
public override void Write(byte[] buffer, int offset, int count) =>
428+
throw new NotImplementedException();
429+
}
430+
```
431+
432+
The following code implements JavaScript functions to receive the data:
433+
434+
```javascript
435+
function getFileSize(selector) {
436+
const file = getFile(selector);
437+
return file.size;
438+
}
439+
440+
async function receiveSegment(segmentNumber, selector) {
441+
const file = getFile(selector);
442+
var segments = getFileSegments(file);
443+
var index = segmentNumber * 6144;
444+
return await getNextChunk(file, index);
445+
}
446+
447+
function getFile(selector) {
448+
const element = document.querySelector(selector);
449+
if (!element) {
450+
throw new Error('Invalid selector');
451+
}
452+
const files = element.files;
453+
if (!files || files.length === 0) {
454+
throw new Error(`Element ${elementId} doesn't contain any files.`);
455+
}
456+
const file = files[0];
457+
return file;
458+
}
459+
460+
function getFileSegments(file) {
461+
const segments = Math.floor(size % 6144 === 0 ? size / 6144 : 1 + size / 6144);
462+
return segments;
463+
}
464+
465+
async function getNextChunk(file, index) {
466+
const length = file.size - index <= 6144 ? file.size - index : 6144;
467+
const chunk = file.slice(index, index + length);
468+
index += length;
469+
const base64Chunk = await this.base64EncodeAsync(chunk);
470+
return { base64Chunk, index };
471+
}
472+
473+
async function base64EncodeAsync(chunk) {
474+
const reader = new FileReader();
475+
const result = new Promise((resolve, reject) => {
476+
reader.addEventListener('load',
477+
() => {
478+
const base64Chunk = reader.result;
479+
const cleanChunk =
480+
base64Chunk.replace('data:application/octet-stream;base64,', '');
481+
resolve(cleanChunk);
482+
},
483+
false);
484+
reader.addEventListener('error', reject);
485+
});
486+
reader.readAsDataURL(chunk);
487+
return result;
488+
}
489+
```

0 commit comments

Comments
 (0)