@@ -38,8 +38,10 @@ import (
38
38
39
39
"github.com/cenkalti/backoff"
40
40
"github.com/dustin/go-humanize"
41
+ "github.com/k0kubun/go-ansi"
41
42
"github.com/miolini/datacounter"
42
43
"github.com/nightlyone/lockfile"
44
+ "github.com/schollz/progressbar/v3"
43
45
"golang.org/x/sync/errgroup"
44
46
45
47
"github.com/someone1/zfsbackup-go/backends"
@@ -320,6 +322,7 @@ func Backup(pctx context.Context, jobInfo *files.JobInfo) error {
320
322
321
323
// Prepare backends and setup plumbing
322
324
for _ , destination := range jobInfo .Destinations {
325
+ log .AppLogger .Infof ("Initializing backend for destination %s" , destination )
323
326
backend , berr := prepareBackend (ctx , jobInfo , destination , uploadBuffer )
324
327
if berr != nil {
325
328
log .AppLogger .Errorf ("Could not initialize backend due to error - %v." , berr )
@@ -416,16 +419,19 @@ func Backup(pctx context.Context, jobInfo *files.JobInfo) error {
416
419
} else {
417
420
fmt .Fprintf (
418
421
config .Stdout ,
419
- "Done.\n \t Total ZFS Stream Bytes: %d (%s)\n \t Total Bytes Written: %d (%s)\n \t Elapsed Time: %v\n \t Total Files Uploaded: %d\n " ,
422
+ "Done.\n \t Total ZFS Stream Bytes: %d (%s)\n \t Total Bytes Written: %d (%s)\n \t Elapsed Time: %v\n \t Total Files Uploaded: %d\n \t Average Upload Rate: %s \n " ,
420
423
jobInfo .ZFSStreamBytes ,
421
424
humanize .IBytes (jobInfo .ZFSStreamBytes ),
422
425
totalWrittenBytes ,
423
426
humanize .IBytes (totalWrittenBytes ),
424
427
time .Since (jobInfo .StartTime ),
425
428
len (jobInfo .Volumes )+ 1 ,
429
+ fmt .Sprintf ("%.2f TB/hr" , float64 (totalWrittenBytes )/ 1e12 / time .Since (jobInfo .StartTime ).Hours ()),
426
430
)
427
431
}
428
432
433
+ fmt .Printf ("Backup of %s completed successfully.\n " , jobInfo .VolumeName )
434
+
429
435
log .AppLogger .Debugf ("Cleaning up resources..." )
430
436
431
437
for _ , backend := range usedBackends {
@@ -478,6 +484,7 @@ func saveManifest(ctx context.Context, j *files.JobInfo, final bool) (*files.Vol
478
484
}
479
485
480
486
// nolint:funlen,gocyclo // Difficult to break this apart
487
+
481
488
func sendStream (ctx context.Context , j * files.JobInfo , c chan <- * files.VolumeInfo , buffer <- chan bool ) error {
482
489
var group * errgroup.Group
483
490
group , ctx = errgroup .WithContext (ctx )
@@ -493,6 +500,39 @@ func sendStream(ctx context.Context, j *files.JobInfo, c chan<- *files.VolumeInf
493
500
usingPipe = true
494
501
}
495
502
503
+ // Get total dataset size for progress tracking
504
+ totalSize , err := zfs .GetDatasetSize (ctx , j .VolumeName )
505
+ if err != nil {
506
+ return err
507
+ }
508
+
509
+ // Initialize progress bar
510
+ bar := progressbar .NewOptions64 (int64 (totalSize ),
511
+ progressbar .OptionSetWriter (ansi .NewAnsiStdout ()),
512
+ progressbar .OptionEnableColorCodes (true ),
513
+ progressbar .OptionShowBytes (true ),
514
+ progressbar .OptionSetWidth (50 ),
515
+ progressbar .OptionThrottle (65 * time .Millisecond ),
516
+ progressbar .OptionShowCount (),
517
+ progressbar .OptionSetDescription ("[cyan]Backing up...[reset]" ),
518
+ progressbar .OptionOnCompletion (func () {
519
+ fmt .Fprint (ansi .NewAnsiStdout (), "\n " )
520
+ }),
521
+ progressbar .OptionSpinnerType (14 ),
522
+ progressbar .OptionFullWidth (),
523
+ progressbar .OptionSetTheme (progressbar.Theme {
524
+ Saucer : "[green]=[reset]" ,
525
+ SaucerHead : "[green]>[reset]" ,
526
+ SaucerPadding : " " ,
527
+ BarStart : "[" ,
528
+ BarEnd : "]" ,
529
+ }),
530
+ )
531
+
532
+ // Initialize chunk tracking variables
533
+ totalChunks := int (totalSize / (j .VolumeSize * humanize .MiByte ))
534
+ var processedChunks int
535
+
496
536
group .Go (func () error {
497
537
var lastTotalBytes uint64
498
538
defer close (c )
@@ -527,6 +567,8 @@ func sendStream(ctx context.Context, j *files.JobInfo, c chan<- *files.VolumeInf
527
567
if ! usingPipe {
528
568
c <- volume
529
569
}
570
+ processedChunks ++
571
+ bar .Describe (fmt .Sprintf ("Backing up... (%d/%d chunks)" , processedChunks , totalChunks ))
530
572
}
531
573
<- buffer
532
574
volume , err = files .CreateBackupVolume (ctx , j , volNum )
@@ -542,7 +584,7 @@ func sendStream(ctx context.Context, j *files.JobInfo, c chan<- *files.VolumeInf
542
584
}
543
585
544
586
// Write a little at a time and break the output between volumes as needed
545
- _ , ierr := io .CopyN (volume , counter , files .BufferSize * 2 )
587
+ bytesWritten , ierr := io .CopyN (volume , counter , files .BufferSize * 2 )
546
588
if ierr == io .EOF {
547
589
// We are done!
548
590
log .AppLogger .Debugf ("Finished creating volume %s" , volume .ObjectName )
@@ -554,17 +596,21 @@ func sendStream(ctx context.Context, j *files.JobInfo, c chan<- *files.VolumeInf
554
596
if ! usingPipe {
555
597
c <- volume
556
598
}
599
+ processedChunks ++
600
+ bar .Describe (fmt .Sprintf ("Backing up... (%d/%d chunks)" , processedChunks , totalChunks ))
557
601
return nil
558
602
} else if ierr != nil {
559
603
log .AppLogger .Errorf ("Error while trying to read from the zfs stream for volume %s - %v" , volume .ObjectName , ierr )
560
604
return ierr
561
605
}
606
+ // Update progress bar
607
+ bar .Add64 (int64 (bytesWritten ))
562
608
}
563
609
})
564
610
565
611
// Start the zfs send command
566
612
log .AppLogger .Infof ("Starting zfs send command: %s" , strings .Join (cmd .Args , " " ))
567
- err : = cmd .Start ()
613
+ err = cmd .Start ()
568
614
if err != nil {
569
615
log .AppLogger .Errorf ("Error starting zfs command - %v" , err )
570
616
return err
0 commit comments