Switch to upload using JS
This commit is contained in:
parent
9e4abc535b
commit
09211b6213
1 changed files with 13 additions and 66 deletions
|
@ -151,83 +151,30 @@
|
||||||
private async void FileSelected(InputFileChangeEventArgs args)
|
private async void FileSelected(InputFileChangeEventArgs args)
|
||||||
{
|
{
|
||||||
File = args.File;
|
File = args.File;
|
||||||
|
|
||||||
//var buffer = new byte[File.Size];
|
|
||||||
|
|
||||||
//await File.OpenReadStream().ReadAsync(buffer);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private async Task UploadArchiveJS()
|
private async Task UploadArchiveJS()
|
||||||
{
|
|
||||||
var key = (await JS.InvokeAsync<string>("Uploader.Upload", "FileInput"));
|
|
||||||
}
|
|
||||||
|
|
||||||
private async Task UploadArchive()
|
|
||||||
{
|
|
||||||
long uploadedBytes = 0;
|
|
||||||
long totalBytes = File.Size;
|
|
||||||
|
|
||||||
Watch = new Stopwatch();
|
|
||||||
|
|
||||||
using (var stream = File.OpenReadStream(long.MaxValue))
|
|
||||||
{
|
{
|
||||||
Uploading = true;
|
Uploading = true;
|
||||||
|
|
||||||
Watch.Start();
|
var response = (await JS.InvokeAsync<string>("Uploader.Upload", "FileInput"));
|
||||||
|
|
||||||
while (Uploading)
|
if (Guid.TryParse(response, out var objectKey))
|
||||||
{
|
{
|
||||||
byte[] chunk;
|
|
||||||
|
|
||||||
if (totalBytes - uploadedBytes < ChunkSize)
|
|
||||||
chunk = new byte[totalBytes - uploadedBytes];
|
|
||||||
else
|
|
||||||
chunk = new byte[ChunkSize];
|
|
||||||
|
|
||||||
int bytesRead = 0;
|
|
||||||
|
|
||||||
// This feels hacky, why do we need to do this?
|
|
||||||
// Only 32256 bytes of the file get read unless we
|
|
||||||
// loop through like this. Probably kills performance.
|
|
||||||
/*while (bytesRead < chunk.Length)
|
|
||||||
{
|
|
||||||
bytesRead += await stream.ReadAsync(chunk, bytesRead, chunk.Length - bytesRead);
|
|
||||||
}*/
|
|
||||||
|
|
||||||
|
|
||||||
using (FileStream fs = new FileStream(Path.Combine("Upload", Archive.Id.ToString()), FileMode.Append))
|
|
||||||
{
|
|
||||||
await stream.CopyToAsync(fs, ChunkSize);
|
|
||||||
//await fs.WriteAsync(chunk);
|
|
||||||
}
|
|
||||||
|
|
||||||
uploadedBytes += ChunkSize;
|
|
||||||
WatchBytesTransferred += ChunkSize;
|
|
||||||
|
|
||||||
Progress = (int)(uploadedBytes * 100 / totalBytes);
|
|
||||||
|
|
||||||
if (Watch.Elapsed.TotalSeconds >= 1)
|
|
||||||
{
|
|
||||||
Speed = WatchBytesTransferred * (1 / Watch.Elapsed.TotalSeconds);
|
|
||||||
WatchBytesTransferred = 0;
|
|
||||||
Watch.Restart();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (Progress >= 100)
|
|
||||||
{
|
|
||||||
Watch.Stop();
|
|
||||||
Uploading = false;
|
Uploading = false;
|
||||||
await UploadComplete();
|
await UploadComplete(objectKey);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
await MessageService.Error("Archive failed to upload!");
|
||||||
}
|
}
|
||||||
|
|
||||||
await InvokeAsync(StateHasChanged);
|
await InvokeAsync(StateHasChanged);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private async Task UploadComplete()
|
private async Task UploadComplete(Guid objectKey)
|
||||||
{
|
{
|
||||||
Archive.ObjectKey = Archive.Id.ToString();
|
Archive.ObjectKey = objectKey.ToString();
|
||||||
Archive.CompressedSize = File.Size;
|
Archive.CompressedSize = File.Size;
|
||||||
|
|
||||||
await ArchiveService.Add(Archive);
|
await ArchiveService.Add(Archive);
|
||||||
|
|
Loading…
Add table
Reference in a new issue