bisync: deglobalize to fix concurrent runs via rc - fixes #8675

Before this change, bisync used some global variables, which could cause errors
if running multiple concurrent bisync runs through the rc. (Running normally
from the command line was not affected.)

This change deglobalizes those variables so that multiple bisync runs can be
safely run at once, from the same rclone instance.
This commit is contained in:
nielash
2025-07-11 12:11:16 -04:00
parent cc20d93f47
commit 9073d17313
13 changed files with 255 additions and 235 deletions

View File

@@ -176,6 +176,7 @@ var (
// Flag -refresh-times helps with Dropbox tests failing with message // Flag -refresh-times helps with Dropbox tests failing with message
// "src and dst identical but can't set mod time without deleting and re-uploading" // "src and dst identical but can't set mod time without deleting and re-uploading"
argRefreshTimes = flag.Bool("refresh-times", false, "Force refreshing the target modtime, useful for Dropbox (default: false)") argRefreshTimes = flag.Bool("refresh-times", false, "Force refreshing the target modtime, useful for Dropbox (default: false)")
ignoreLogs = flag.Bool("ignore-logs", false, "skip comparing log lines but still compare listings")
) )
// bisyncTest keeps all test data in a single place // bisyncTest keeps all test data in a single place
@@ -264,6 +265,25 @@ func TestBisyncRemoteRemote(t *testing.T) {
testBisync(t, remote, remote) testBisync(t, remote, remote)
} }
// make sure rc can cope with running concurrent jobs
func TestBisyncConcurrent(t *testing.T) {
oldArgTestCase := argTestCase
*argTestCase = "basic"
*ignoreLogs = true // not useful to compare logs here because both runs will be logging at once
t.Cleanup(func() {
argTestCase = oldArgTestCase
*ignoreLogs = false
})
t.Run("test1", testParallel)
t.Run("test2", testParallel)
}
func testParallel(t *testing.T) {
t.Parallel()
TestBisyncRemoteRemote(t)
}
// TestBisync is a test engine for bisync test cases. // TestBisync is a test engine for bisync test cases.
func testBisync(t *testing.T, path1, path2 string) { func testBisync(t *testing.T, path1, path2 string) {
ctx := context.Background() ctx := context.Background()
@@ -1441,6 +1461,9 @@ func (b *bisyncTest) compareResults() int {
resultText := b.mangleResult(b.workDir, file, false) resultText := b.mangleResult(b.workDir, file, false)
if fileType(file) == "log" { if fileType(file) == "log" {
if *ignoreLogs {
continue
}
// save mangled logs so difference is easier on eyes // save mangled logs so difference is easier on eyes
goldenFile := filepath.Join(b.logDir, "mangled.golden.log") goldenFile := filepath.Join(b.logDir, "mangled.golden.log")
resultFile := filepath.Join(b.logDir, "mangled.result.log") resultFile := filepath.Join(b.logDir, "mangled.result.log")

View File

@@ -16,15 +16,17 @@ import (
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
) )
var hashType hash.Type type bisyncCheck = struct {
var fsrc, fdst fs.Fs hashType hash.Type
var fcrypt *crypt.Fs fsrc, fdst fs.Fs
fcrypt *crypt.Fs
}
// WhichCheck determines which CheckFn we should use based on the Fs types // WhichCheck determines which CheckFn we should use based on the Fs types
// It is more robust and accurate than Check because // It is more robust and accurate than Check because
// it will fallback to CryptCheck or DownloadCheck instead of --size-only! // it will fallback to CryptCheck or DownloadCheck instead of --size-only!
// it returns the *operations.CheckOpt with the CheckFn set. // it returns the *operations.CheckOpt with the CheckFn set.
func WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.CheckOpt { func (b *bisyncRun) WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.CheckOpt {
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
common := opt.Fsrc.Hashes().Overlap(opt.Fdst.Hashes()) common := opt.Fsrc.Hashes().Overlap(opt.Fdst.Hashes())
@@ -40,32 +42,32 @@ func WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.Check
if (srcIsCrypt && dstIsCrypt) || (!srcIsCrypt && dstIsCrypt) { if (srcIsCrypt && dstIsCrypt) || (!srcIsCrypt && dstIsCrypt) {
// if both are crypt or only dst is crypt // if both are crypt or only dst is crypt
hashType = FdstCrypt.UnWrap().Hashes().GetOne() b.check.hashType = FdstCrypt.UnWrap().Hashes().GetOne()
if hashType != hash.None { if b.check.hashType != hash.None {
// use cryptcheck // use cryptcheck
fsrc = opt.Fsrc b.check.fsrc = opt.Fsrc
fdst = opt.Fdst b.check.fdst = opt.Fdst
fcrypt = FdstCrypt b.check.fcrypt = FdstCrypt
fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)") fs.Infof(b.check.fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
opt.Check = CryptCheckFn opt.Check = b.CryptCheckFn
return opt return opt
} }
} else if srcIsCrypt && !dstIsCrypt { } else if srcIsCrypt && !dstIsCrypt {
// if only src is crypt // if only src is crypt
hashType = FsrcCrypt.UnWrap().Hashes().GetOne() b.check.hashType = FsrcCrypt.UnWrap().Hashes().GetOne()
if hashType != hash.None { if b.check.hashType != hash.None {
// use reverse cryptcheck // use reverse cryptcheck
fsrc = opt.Fdst b.check.fsrc = opt.Fdst
fdst = opt.Fsrc b.check.fdst = opt.Fsrc
fcrypt = FsrcCrypt b.check.fcrypt = FsrcCrypt
fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)") fs.Infof(b.check.fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
opt.Check = ReverseCryptCheckFn opt.Check = b.ReverseCryptCheckFn
return opt return opt
} }
} }
// if we've gotten this far, neither check or cryptcheck will work, so use --download // if we've gotten this far, neither check or cryptcheck will work, so use --download
fs.Infof(fdst, "Can't compare hashes, so using check --download for safety. (Use --size-only or --ignore-checksum to disable)") fs.Infof(b.check.fdst, "Can't compare hashes, so using check --download for safety. (Use --size-only or --ignore-checksum to disable)")
opt.Check = DownloadCheckFn opt.Check = DownloadCheckFn
return opt return opt
} }
@@ -88,17 +90,17 @@ func CheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool,
} }
// CryptCheckFn is a slightly modified version of CryptCheck // CryptCheckFn is a slightly modified version of CryptCheck
func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) { func (b *bisyncRun) CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
cryptDst := dst.(*crypt.Object) cryptDst := dst.(*crypt.Object)
underlyingDst := cryptDst.UnWrap() underlyingDst := cryptDst.UnWrap()
underlyingHash, err := underlyingDst.Hash(ctx, hashType) underlyingHash, err := underlyingDst.Hash(ctx, b.check.hashType)
if err != nil { if err != nil {
return true, false, fmt.Errorf("error reading hash from underlying %v: %w", underlyingDst, err) return true, false, fmt.Errorf("error reading hash from underlying %v: %w", underlyingDst, err)
} }
if underlyingHash == "" { if underlyingHash == "" {
return false, true, nil return false, true, nil
} }
cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType) cryptHash, err := b.check.fcrypt.ComputeHash(ctx, cryptDst, src, b.check.hashType)
if err != nil { if err != nil {
return true, false, fmt.Errorf("error computing hash: %w", err) return true, false, fmt.Errorf("error computing hash: %w", err)
} }
@@ -106,10 +108,10 @@ func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash
return false, true, nil return false, true, nil
} }
if cryptHash != underlyingHash { if cryptHash != underlyingHash {
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash) err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", b.check.fdst.Name(), b.check.fdst.Root(), cryptHash, b.check.fsrc.Name(), b.check.fsrc.Root(), underlyingHash)
fs.Debugf(src, "%s", err.Error()) fs.Debugf(src, "%s", err.Error())
// using same error msg as CheckFn so integration tests match // using same error msg as CheckFn so integration tests match
err = fmt.Errorf("%v differ", hashType) err = fmt.Errorf("%v differ", b.check.hashType)
fs.Errorf(src, "%s", err.Error()) fs.Errorf(src, "%s", err.Error())
return true, false, nil return true, false, nil
} }
@@ -118,8 +120,8 @@ func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash
// ReverseCryptCheckFn is like CryptCheckFn except src and dst are switched // ReverseCryptCheckFn is like CryptCheckFn except src and dst are switched
// result: src is crypt, dst is non-crypt // result: src is crypt, dst is non-crypt
func ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) { func (b *bisyncRun) ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
return CryptCheckFn(ctx, src, dst) return b.CryptCheckFn(ctx, src, dst)
} }
// DownloadCheckFn is a slightly modified version of Check with --download // DownloadCheckFn is a slightly modified version of Check with --download
@@ -137,7 +139,7 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter
if filterCheck.HaveFilesFrom() { if filterCheck.HaveFilesFrom() {
fs.Debugf(nil, "There are potential conflicts to check.") fs.Debugf(nil, "There are potential conflicts to check.")
opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2) opt, close, checkopterr := check.GetCheckOpt(fs1, fs2)
if checkopterr != nil { if checkopterr != nil {
b.critical = true b.critical = true
b.retryable = true b.retryable = true
@@ -148,16 +150,16 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter
opt.Match = new(bytes.Buffer) opt.Match = new(bytes.Buffer)
opt = WhichCheck(ctxCheck, opt) opt = b.WhichCheck(ctxCheck, opt)
fs.Infof(nil, "Checking potential conflicts...") fs.Infof(nil, "Checking potential conflicts...")
check := operations.CheckFn(ctxCheck, opt) check := operations.CheckFn(ctxCheck, opt)
fs.Infof(nil, "Finished checking the potential conflicts. %s", check) fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
//reset error count, because we don't want to count check errors as bisync errors // reset error count, because we don't want to count check errors as bisync errors
accounting.Stats(ctxCheck).ResetErrors() accounting.Stats(ctxCheck).ResetErrors()
//return the list of identical files to check against later // return the list of identical files to check against later
if len(fmt.Sprint(opt.Match)) > 0 { if len(fmt.Sprint(opt.Match)) > 0 {
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n")) matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
} }
@@ -173,14 +175,14 @@ func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter
// WhichEqual is similar to WhichCheck, but checks a single object. // WhichEqual is similar to WhichCheck, but checks a single object.
// Returns true if the objects are equal, false if they differ or if we don't know // Returns true if the objects are equal, false if they differ or if we don't know
func WhichEqual(ctx context.Context, src, dst fs.Object, Fsrc, Fdst fs.Fs) bool { func (b *bisyncRun) WhichEqual(ctx context.Context, src, dst fs.Object, Fsrc, Fdst fs.Fs) bool {
opt, close, checkopterr := check.GetCheckOpt(Fsrc, Fdst) opt, close, checkopterr := check.GetCheckOpt(Fsrc, Fdst)
if checkopterr != nil { if checkopterr != nil {
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr) fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
} }
defer close() defer close()
opt = WhichCheck(ctx, opt) opt = b.WhichCheck(ctx, opt)
differ, noHash, err := opt.Check(ctx, dst, src) differ, noHash, err := opt.Check(ctx, dst, src)
if err != nil { if err != nil {
fs.Errorf(src, "failed to check: %v", err) fs.Errorf(src, "failed to check: %v", err)
@@ -217,7 +219,7 @@ func (b *bisyncRun) EqualFn(ctx context.Context) context.Context {
equal, skipHash = timeSizeEqualFn() equal, skipHash = timeSizeEqualFn()
if equal && !skipHash { if equal && !skipHash {
whichHashType := func(f fs.Info) hash.Type { whichHashType := func(f fs.Info) hash.Type {
ht := getHashType(f.Name()) ht := b.getHashType(f.Name())
if ht == hash.None && b.opt.Compare.SlowHashSyncOnly && !b.opt.Resync { if ht == hash.None && b.opt.Compare.SlowHashSyncOnly && !b.opt.Resync {
ht = f.Hashes().GetOne() ht = f.Hashes().GetOne()
} }
@@ -225,9 +227,9 @@ func (b *bisyncRun) EqualFn(ctx context.Context) context.Context {
} }
srcHash, _ := src.Hash(ctx, whichHashType(src.Fs())) srcHash, _ := src.Hash(ctx, whichHashType(src.Fs()))
dstHash, _ := dst.Hash(ctx, whichHashType(dst.Fs())) dstHash, _ := dst.Hash(ctx, whichHashType(dst.Fs()))
srcHash, _ = tryDownloadHash(ctx, src, srcHash) srcHash, _ = b.tryDownloadHash(ctx, src, srcHash)
dstHash, _ = tryDownloadHash(ctx, dst, dstHash) dstHash, _ = b.tryDownloadHash(ctx, dst, dstHash)
equal = !hashDiffers(srcHash, dstHash, whichHashType(src.Fs()), whichHashType(dst.Fs()), src.Size(), dst.Size()) equal = !b.hashDiffers(srcHash, dstHash, whichHashType(src.Fs()), whichHashType(dst.Fs()), src.Size(), dst.Size())
} }
if equal { if equal {
logger(ctx, operations.Match, src, dst, nil) logger(ctx, operations.Match, src, dst, nil)

View File

@@ -115,6 +115,7 @@ func (x *CheckSyncMode) Type() string {
} }
// Opt keeps command line options // Opt keeps command line options
// internal functions should use b.opt instead
var Opt Options var Opt Options
func init() { func init() {

View File

@@ -28,7 +28,7 @@ type CompareOpt = struct {
DownloadHash bool DownloadHash bool
} }
func (b *bisyncRun) setCompareDefaults(ctx context.Context) error { func (b *bisyncRun) setCompareDefaults(ctx context.Context) (err error) {
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
// defaults // defaults
@@ -120,25 +120,25 @@ func sizeDiffers(a, b int64) bool {
// returns true if the hashes are definitely different. // returns true if the hashes are definitely different.
// returns false if equal, or if either is unknown. // returns false if equal, or if either is unknown.
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool { func (b *bisyncRun) hashDiffers(stringA, stringB string, ht1, ht2 hash.Type, size1, size2 int64) bool {
if a == "" || b == "" { if stringA == "" || stringB == "" {
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) { if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b) fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), stringA, stringB)
} }
return false return false
} }
if ht1 != ht2 { if ht1 != ht2 {
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) { if !(b.downloadHashOpt.downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String()) fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
return false return false
} }
} }
return a != b return stringA != stringB
} }
// chooses hash type, giving priority to types both sides have in common // chooses hash type, giving priority to types both sides have in common
func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) { func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
downloadHash = b.opt.Compare.DownloadHash b.downloadHashOpt.downloadHash = b.opt.Compare.DownloadHash
if b.opt.Compare.NoSlowHash && b.opt.Compare.SlowHashDetected { if b.opt.Compare.NoSlowHash && b.opt.Compare.SlowHashDetected {
fs.Infof(nil, "Not checking for common hash as at least one slow hash detected.") fs.Infof(nil, "Not checking for common hash as at least one slow hash detected.")
} else { } else {
@@ -268,13 +268,15 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
return nil return nil
} }
// downloadHash is true if we should attempt to compute hash by downloading when otherwise unavailable // b.downloadHashOpt.downloadHash is true if we should attempt to compute hash by downloading when otherwise unavailable
var downloadHash bool type downloadHashOpt struct {
var downloadHashWarn mutex.Once downloadHash bool
var firstDownloadHash mutex.Once downloadHashWarn mutex.Once
firstDownloadHash mutex.Once
}
func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string, error) { func (b *bisyncRun) tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string, error) {
if hashVal != "" || !downloadHash { if hashVal != "" || !b.downloadHashOpt.downloadHash {
return hashVal, nil return hashVal, nil
} }
obj, ok := o.(fs.Object) obj, ok := o.(fs.Object)
@@ -283,14 +285,14 @@ func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string
return hashVal, fs.ErrorObjectNotFound return hashVal, fs.ErrorObjectNotFound
} }
if o.Size() < 0 { if o.Size() < 0 {
downloadHashWarn.Do(func() { b.downloadHashOpt.downloadHashWarn.Do(func() {
fs.Log(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length.")) fs.Log(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
}) })
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.") fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
return hashVal, hash.ErrUnsupported return hashVal, hash.ErrUnsupported
} }
firstDownloadHash.Do(func() { b.downloadHashOpt.firstDownloadHash.Do(func() {
fs.Infoc(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes...")) fs.Infoc(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
}) })
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash") tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")

View File

@@ -219,7 +219,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string,
} }
} }
if b.opt.Compare.Checksum { if b.opt.Compare.Checksum {
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) { if b.hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file)) fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash")) whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
d |= deltaHash d |= deltaHash
@@ -346,7 +346,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
if d2.is(deltaOther) { if d2.is(deltaOther) {
// if size or hash differ, skip this, as we already know they're not equal // if size or hash differ, skip this, as we already know they're not equal
if (b.opt.Compare.Size && sizeDiffers(ds1.size[file], ds2.size[file2])) || if (b.opt.Compare.Size && sizeDiffers(ds1.size[file], ds2.size[file2])) ||
(b.opt.Compare.Checksum && hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) { (b.opt.Compare.Checksum && b.hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) {
fs.Debugf(file, "skipping equality check as size/hash definitely differ") fs.Debugf(file, "skipping equality check as size/hash definitely differ")
} else { } else {
checkit := func(filename string) { checkit := func(filename string) {
@@ -393,10 +393,10 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
// if files are identical, leave them alone instead of renaming // if files are identical, leave them alone instead of renaming
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) { if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file) fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
ls1.getPut(file, skippedDirs1) b.march.ls1.getPut(file, skippedDirs1)
ls2.getPut(file, skippedDirs2) b.march.ls2.getPut(file, skippedDirs2)
b.debugFn(file, func() { b.debugFn(file, func() {
b.debug(file, fmt.Sprintf("deltas dir: %s, ls1 has name?: %v, ls2 has name?: %v", file, ls1.has(b.DebugName), ls2.has(b.DebugName))) b.debug(file, fmt.Sprintf("deltas dir: %s, ls1 has name?: %v, b.march.ls2 has name?: %v", file, b.march.ls1.has(b.DebugName), b.march.ls2.has(b.DebugName)))
}) })
} else { } else {
equal := matches.Has(file) equal := matches.Has(file)
@@ -409,16 +409,16 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (result
// the Path1 version is deemed "correct" in this scenario // the Path1 version is deemed "correct" in this scenario
fs.Infof(alias, "Files are equal but will copy anyway to fix case to %s", file) fs.Infof(alias, "Files are equal but will copy anyway to fix case to %s", file)
copy1to2.Add(file) copy1to2.Add(file)
} else if b.opt.Compare.Modtime && timeDiffers(ctx, ls1.getTime(ls1.getTryAlias(file, alias)), ls2.getTime(ls2.getTryAlias(file, alias)), b.fs1, b.fs2) { } else if b.opt.Compare.Modtime && timeDiffers(ctx, b.march.ls1.getTime(b.march.ls1.getTryAlias(file, alias)), b.march.ls2.getTime(b.march.ls2.getTryAlias(file, alias)), b.fs1, b.fs2) {
fs.Infof(file, "Files are equal but will copy anyway to update modtime (will not rename)") fs.Infof(file, "Files are equal but will copy anyway to update modtime (will not rename)")
if ls1.getTime(ls1.getTryAlias(file, alias)).Before(ls2.getTime(ls2.getTryAlias(file, alias))) { if b.march.ls1.getTime(b.march.ls1.getTryAlias(file, alias)).Before(b.march.ls2.getTime(b.march.ls2.getTryAlias(file, alias))) {
// Path2 is newer // Path2 is newer
b.indent("Path2", p1, "Queue copy to Path1") b.indent("Path2", p1, "Queue copy to Path1")
copy2to1.Add(ls2.getTryAlias(file, alias)) copy2to1.Add(b.march.ls2.getTryAlias(file, alias))
} else { } else {
// Path1 is newer // Path1 is newer
b.indent("Path1", p2, "Queue copy to Path2") b.indent("Path1", p2, "Queue copy to Path2")
copy1to2.Add(ls1.getTryAlias(file, alias)) copy1to2.Add(b.march.ls1.getTryAlias(file, alias))
} }
} else { } else {
fs.Infof(nil, "Files are equal! Skipping: %s", file) fs.Infof(nil, "Files are equal! Skipping: %s", file)
@@ -590,10 +590,10 @@ func (b *bisyncRun) updateAliases(ctx context.Context, ds1, ds2 *deltaSet) {
fullMap1 := map[string]string{} // [transformedname]originalname fullMap1 := map[string]string{} // [transformedname]originalname
fullMap2 := map[string]string{} // [transformedname]originalname fullMap2 := map[string]string{} // [transformedname]originalname
for _, name := range ls1.list { for _, name := range b.march.ls1.list {
fullMap1[transform(name)] = name fullMap1[transform(name)] = name
} }
for _, name := range ls2.list { for _, name := range b.march.ls2.list {
fullMap2[transform(name)] = name fullMap2[transform(name)] = name
} }

View File

@@ -202,8 +202,8 @@ func (b *bisyncRun) fileInfoEqual(file1, file2 string, ls1, ls2 *fileList) bool
equal = false equal = false
} }
} }
if b.opt.Compare.Checksum && !ignoreListingChecksum { if b.opt.Compare.Checksum && !b.queueOpt.ignoreListingChecksum {
if hashDiffers(ls1.getHash(file1), ls2.getHash(file2), b.opt.Compare.HashType1, b.opt.Compare.HashType2, ls1.getSize(file1), ls2.getSize(file2)) { if b.hashDiffers(ls1.getHash(file1), ls2.getHash(file2), b.opt.Compare.HashType1, b.opt.Compare.HashType2, ls1.getSize(file1), ls2.getSize(file2)) {
b.indent("ERROR", file1, fmt.Sprintf("Checksum not equal in listing. Path1: %v, Path2: %v", ls1.getHash(file1), ls2.getHash(file2))) b.indent("ERROR", file1, fmt.Sprintf("Checksum not equal in listing. Path1: %v, Path2: %v", ls1.getHash(file1), ls2.getHash(file2)))
equal = false equal = false
} }
@@ -745,7 +745,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList,
if hashType != hash.None { if hashType != hash.None {
hashVal, _ = obj.Hash(ctxRecheck, hashType) hashVal, _ = obj.Hash(ctxRecheck, hashType)
} }
hashVal, _ = tryDownloadHash(ctxRecheck, obj, hashVal) hashVal, _ = b.tryDownloadHash(ctxRecheck, obj, hashVal)
} }
var modtime time.Time var modtime time.Time
if b.opt.Compare.Modtime { if b.opt.Compare.Modtime {
@@ -759,7 +759,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList,
for _, dstObj := range dstObjs { for _, dstObj := range dstObjs {
if srcObj.Remote() == dstObj.Remote() || srcObj.Remote() == b.aliases.Alias(dstObj.Remote()) { if srcObj.Remote() == dstObj.Remote() || srcObj.Remote() == b.aliases.Alias(dstObj.Remote()) {
// note: unlike Equal(), WhichEqual() does not update the modtime in dest if sums match but modtimes don't. // note: unlike Equal(), WhichEqual() does not update the modtime in dest if sums match but modtimes don't.
if b.opt.DryRun || WhichEqual(ctxRecheck, srcObj, dstObj, src, dst) { if b.opt.DryRun || b.WhichEqual(ctxRecheck, srcObj, dstObj, src, dst) {
putObj(srcObj, srcList) putObj(srcObj, srcList)
putObj(dstObj, dstList) putObj(dstObj, dstList)
resolved = append(resolved, srcObj.Remote()) resolved = append(resolved, srcObj.Remote())
@@ -773,7 +773,7 @@ func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList,
// skip and error during --resync, as rollback is not possible // skip and error during --resync, as rollback is not possible
if !slices.Contains(resolved, srcObj.Remote()) && !b.opt.DryRun { if !slices.Contains(resolved, srcObj.Remote()) && !b.opt.DryRun {
if b.opt.Resync { if b.opt.Resync {
err = errors.New("no dstObj match or files not equal") err := errors.New("no dstObj match or files not equal")
b.handleErr(srcObj, "Unable to rollback during --resync", err, true, false) b.handleErr(srcObj, "Unable to rollback during --resync", err, true, false)
} else { } else {
toRollback = append(toRollback, srcObj.Remote()) toRollback = append(toRollback, srcObj.Remote())

View File

@@ -16,16 +16,17 @@ import (
const basicallyforever = fs.Duration(200 * 365 * 24 * time.Hour) const basicallyforever = fs.Duration(200 * 365 * 24 * time.Hour)
var stopRenewal func() type lockFileOpt struct {
stopRenewal func()
data struct {
Session string
PID string
TimeRenewed time.Time
TimeExpires time.Time
}
}
var data = struct { func (b *bisyncRun) setLockFile() (err error) {
Session string
PID string
TimeRenewed time.Time
TimeExpires time.Time
}{}
func (b *bisyncRun) setLockFile() error {
b.lockFile = "" b.lockFile = ""
b.setLockFileExpiration() b.setLockFileExpiration()
if !b.opt.DryRun { if !b.opt.DryRun {
@@ -45,24 +46,23 @@ func (b *bisyncRun) setLockFile() error {
} }
fs.Debugf(nil, "Lock file created: %s", b.lockFile) fs.Debugf(nil, "Lock file created: %s", b.lockFile)
b.renewLockFile() b.renewLockFile()
stopRenewal = b.startLockRenewal() b.lockFileOpt.stopRenewal = b.startLockRenewal()
} }
return nil return nil
} }
func (b *bisyncRun) removeLockFile() { func (b *bisyncRun) removeLockFile() (err error) {
if b.lockFile != "" { if b.lockFile != "" {
stopRenewal() b.lockFileOpt.stopRenewal()
errUnlock := os.Remove(b.lockFile) err = os.Remove(b.lockFile)
if errUnlock == nil { if err == nil {
fs.Debugf(nil, "Lock file removed: %s", b.lockFile) fs.Debugf(nil, "Lock file removed: %s", b.lockFile)
} else if err == nil {
err = errUnlock
} else { } else {
fs.Errorf(nil, "cannot remove lockfile %s: %v", b.lockFile, errUnlock) fs.Errorf(nil, "cannot remove lockfile %s: %v", b.lockFile, err)
} }
b.lockFile = "" // block removing it again b.lockFile = "" // block removing it again
} }
return err
} }
func (b *bisyncRun) setLockFileExpiration() { func (b *bisyncRun) setLockFileExpiration() {
@@ -77,18 +77,18 @@ func (b *bisyncRun) setLockFileExpiration() {
func (b *bisyncRun) renewLockFile() { func (b *bisyncRun) renewLockFile() {
if b.lockFile != "" && bilib.FileExists(b.lockFile) { if b.lockFile != "" && bilib.FileExists(b.lockFile) {
data.Session = b.basePath b.lockFileOpt.data.Session = b.basePath
data.PID = strconv.Itoa(os.Getpid()) b.lockFileOpt.data.PID = strconv.Itoa(os.Getpid())
data.TimeRenewed = time.Now() b.lockFileOpt.data.TimeRenewed = time.Now()
data.TimeExpires = time.Now().Add(time.Duration(b.opt.MaxLock)) b.lockFileOpt.data.TimeExpires = time.Now().Add(time.Duration(b.opt.MaxLock))
// save data file // save data file
df, err := os.Create(b.lockFile) df, err := os.Create(b.lockFile)
b.handleErr(b.lockFile, "error renewing lock file", err, true, true) b.handleErr(b.lockFile, "error renewing lock file", err, true, true)
b.handleErr(b.lockFile, "error encoding JSON to lock file", json.NewEncoder(df).Encode(data), true, true) b.handleErr(b.lockFile, "error encoding JSON to lock file", json.NewEncoder(df).Encode(b.lockFileOpt.data), true, true)
b.handleErr(b.lockFile, "error closing lock file", df.Close(), true, true) b.handleErr(b.lockFile, "error closing lock file", df.Close(), true, true)
if b.opt.MaxLock < basicallyforever { if b.opt.MaxLock < basicallyforever {
fs.Infof(nil, Color(terminal.HiBlueFg, "lock file renewed for %v. New expiration: %v"), b.opt.MaxLock, data.TimeExpires) fs.Infof(nil, Color(terminal.HiBlueFg, "lock file renewed for %v. New expiration: %v"), b.opt.MaxLock, b.lockFileOpt.data.TimeExpires)
} }
} }
} }
@@ -99,7 +99,7 @@ func (b *bisyncRun) lockFileIsExpired() bool {
b.handleErr(b.lockFile, "error reading lock file", err, true, true) b.handleErr(b.lockFile, "error reading lock file", err, true, true)
dec := json.NewDecoder(rdf) dec := json.NewDecoder(rdf)
for { for {
if err := dec.Decode(&data); err != nil { if err := dec.Decode(&b.lockFileOpt.data); err != nil {
if err != io.EOF { if err != io.EOF {
fs.Errorf(b.lockFile, "err: %v", err) fs.Errorf(b.lockFile, "err: %v", err)
} }
@@ -107,14 +107,14 @@ func (b *bisyncRun) lockFileIsExpired() bool {
} }
} }
b.handleErr(b.lockFile, "error closing file", rdf.Close(), true, true) b.handleErr(b.lockFile, "error closing file", rdf.Close(), true, true)
if !data.TimeExpires.IsZero() && data.TimeExpires.Before(time.Now()) { if !b.lockFileOpt.data.TimeExpires.IsZero() && b.lockFileOpt.data.TimeExpires.Before(time.Now()) {
fs.Infof(b.lockFile, Color(terminal.GreenFg, "Lock file found, but it expired at %v. Will delete it and proceed."), data.TimeExpires) fs.Infof(b.lockFile, Color(terminal.GreenFg, "Lock file found, but it expired at %v. Will delete it and proceed."), b.lockFileOpt.data.TimeExpires)
markFailed(b.listing1) // listing is untrusted so force revert to prior (if --recover) or create new ones (if --resync) markFailed(b.listing1) // listing is untrusted so force revert to prior (if --recover) or create new ones (if --resync)
markFailed(b.listing2) markFailed(b.listing2)
return true return true
} }
fs.Infof(b.lockFile, Color(terminal.RedFg, "Valid lock file found. Expires at %v. (%v from now)"), data.TimeExpires, time.Since(data.TimeExpires).Abs().Round(time.Second)) fs.Infof(b.lockFile, Color(terminal.RedFg, "Valid lock file found. Expires at %v. (%v from now)"), b.lockFileOpt.data.TimeExpires, time.Since(b.lockFileOpt.data.TimeExpires).Abs().Round(time.Second))
prettyprint(data, "Lockfile info", fs.LogLevelInfo) prettyprint(b.lockFileOpt.data, "Lockfile info", fs.LogLevelInfo)
} }
return false return false
} }

View File

@@ -12,18 +12,20 @@ import (
"github.com/rclone/rclone/fs/march" "github.com/rclone/rclone/fs/march"
) )
var ls1 = newFileList() type bisyncMarch struct {
var ls2 = newFileList() ls1 *fileList
var err error ls2 *fileList
var firstErr error err error
var marchAliasLock sync.Mutex firstErr error
var marchLsLock sync.Mutex marchAliasLock sync.Mutex
var marchErrLock sync.Mutex marchLsLock sync.Mutex
var marchCtx context.Context marchErrLock sync.Mutex
marchCtx context.Context
}
func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList, error) { func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList, error) {
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
marchCtx = ctx b.march.marchCtx = ctx
b.setupListing() b.setupListing()
fs.Debugf(b, "starting to march!") fs.Debugf(b, "starting to march!")
@@ -39,31 +41,31 @@ func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList,
NoCheckDest: false, NoCheckDest: false,
NoUnicodeNormalization: ci.NoUnicodeNormalization, NoUnicodeNormalization: ci.NoUnicodeNormalization,
} }
err = m.Run(ctx) b.march.err = m.Run(ctx)
fs.Debugf(b, "march completed. err: %v", err) fs.Debugf(b, "march completed. err: %v", b.march.err)
if err == nil { if b.march.err == nil {
err = firstErr b.march.err = b.march.firstErr
} }
if err != nil { if b.march.err != nil {
b.handleErr("march", "error during march", err, true, true) b.handleErr("march", "error during march", b.march.err, true, true)
b.abort = true b.abort = true
return ls1, ls2, err return b.march.ls1, b.march.ls2, b.march.err
} }
// save files // save files
if b.opt.Compare.DownloadHash && ls1.hash == hash.None { if b.opt.Compare.DownloadHash && b.march.ls1.hash == hash.None {
ls1.hash = hash.MD5 b.march.ls1.hash = hash.MD5
} }
if b.opt.Compare.DownloadHash && ls2.hash == hash.None { if b.opt.Compare.DownloadHash && b.march.ls2.hash == hash.None {
ls2.hash = hash.MD5 b.march.ls2.hash = hash.MD5
} }
err = ls1.save(ctx, b.newListing1) b.march.err = b.march.ls1.save(ctx, b.newListing1)
b.handleErr(ls1, "error saving ls1 from march", err, true, true) b.handleErr(b.march.ls1, "error saving b.march.ls1 from march", b.march.err, true, true)
err = ls2.save(ctx, b.newListing2) b.march.err = b.march.ls2.save(ctx, b.newListing2)
b.handleErr(ls2, "error saving ls2 from march", err, true, true) b.handleErr(b.march.ls2, "error saving b.march.ls2 from march", b.march.err, true, true)
return ls1, ls2, err return b.march.ls1, b.march.ls2, b.march.err
} }
// SrcOnly have an object which is on path1 only // SrcOnly have an object which is on path1 only
@@ -83,9 +85,9 @@ func (b *bisyncRun) DstOnly(o fs.DirEntry) (recurse bool) {
// Match is called when object exists on both path1 and path2 (whether equal or not) // Match is called when object exists on both path1 and path2 (whether equal or not)
func (b *bisyncRun) Match(ctx context.Context, o2, o1 fs.DirEntry) (recurse bool) { func (b *bisyncRun) Match(ctx context.Context, o2, o1 fs.DirEntry) (recurse bool) {
fs.Debugf(o1, "both path1 and path2") fs.Debugf(o1, "both path1 and path2")
marchAliasLock.Lock() b.march.marchAliasLock.Lock()
b.aliases.Add(o1.Remote(), o2.Remote()) b.aliases.Add(o1.Remote(), o2.Remote())
marchAliasLock.Unlock() b.march.marchAliasLock.Unlock()
b.parse(o1, true) b.parse(o1, true)
b.parse(o2, false) b.parse(o2, false)
return isDir(o1) return isDir(o1)
@@ -119,76 +121,76 @@ func (b *bisyncRun) parse(e fs.DirEntry, isPath1 bool) {
} }
func (b *bisyncRun) setupListing() { func (b *bisyncRun) setupListing() {
ls1 = newFileList() b.march.ls1 = newFileList()
ls2 = newFileList() b.march.ls2 = newFileList()
// note that --ignore-listing-checksum is different from --ignore-checksum // note that --ignore-listing-checksum is different from --ignore-checksum
// and we already checked it when we set b.opt.Compare.HashType1 and 2 // and we already checked it when we set b.opt.Compare.HashType1 and 2
ls1.hash = b.opt.Compare.HashType1 b.march.ls1.hash = b.opt.Compare.HashType1
ls2.hash = b.opt.Compare.HashType2 b.march.ls2.hash = b.opt.Compare.HashType2
} }
func (b *bisyncRun) ForObject(o fs.Object, isPath1 bool) { func (b *bisyncRun) ForObject(o fs.Object, isPath1 bool) {
tr := accounting.Stats(marchCtx).NewCheckingTransfer(o, "listing file - "+whichPath(isPath1)) tr := accounting.Stats(b.march.marchCtx).NewCheckingTransfer(o, "listing file - "+whichPath(isPath1))
defer func() { defer func() {
tr.Done(marchCtx, nil) tr.Done(b.march.marchCtx, nil)
}() }()
var ( var (
hashVal string hashVal string
hashErr error hashErr error
) )
ls := whichLs(isPath1) ls := b.whichLs(isPath1)
hashType := ls.hash hashType := ls.hash
if hashType != hash.None { if hashType != hash.None {
hashVal, hashErr = o.Hash(marchCtx, hashType) hashVal, hashErr = o.Hash(b.march.marchCtx, hashType)
marchErrLock.Lock() b.march.marchErrLock.Lock()
if firstErr == nil { if b.march.firstErr == nil {
firstErr = hashErr b.march.firstErr = hashErr
} }
marchErrLock.Unlock() b.march.marchErrLock.Unlock()
} }
hashVal, hashErr = tryDownloadHash(marchCtx, o, hashVal) hashVal, hashErr = b.tryDownloadHash(b.march.marchCtx, o, hashVal)
marchErrLock.Lock() b.march.marchErrLock.Lock()
if firstErr == nil { if b.march.firstErr == nil {
firstErr = hashErr b.march.firstErr = hashErr
} }
if firstErr != nil { if b.march.firstErr != nil {
b.handleErr(hashType, "error hashing during march", firstErr, false, true) b.handleErr(hashType, "error hashing during march", b.march.firstErr, false, true)
} }
marchErrLock.Unlock() b.march.marchErrLock.Unlock()
var modtime time.Time var modtime time.Time
if b.opt.Compare.Modtime { if b.opt.Compare.Modtime {
modtime = o.ModTime(marchCtx).In(TZ) modtime = o.ModTime(b.march.marchCtx).In(TZ)
} }
id := "" // TODO: ID(o) id := "" // TODO: ID(o)
flags := "-" // "-" for a file and "d" for a directory flags := "-" // "-" for a file and "d" for a directory
marchLsLock.Lock() b.march.marchLsLock.Lock()
ls.put(o.Remote(), o.Size(), modtime, hashVal, id, flags) ls.put(o.Remote(), o.Size(), modtime, hashVal, id, flags)
marchLsLock.Unlock() b.march.marchLsLock.Unlock()
} }
func (b *bisyncRun) ForDir(o fs.Directory, isPath1 bool) { func (b *bisyncRun) ForDir(o fs.Directory, isPath1 bool) {
tr := accounting.Stats(marchCtx).NewCheckingTransfer(o, "listing dir - "+whichPath(isPath1)) tr := accounting.Stats(b.march.marchCtx).NewCheckingTransfer(o, "listing dir - "+whichPath(isPath1))
defer func() { defer func() {
tr.Done(marchCtx, nil) tr.Done(b.march.marchCtx, nil)
}() }()
ls := whichLs(isPath1) ls := b.whichLs(isPath1)
var modtime time.Time var modtime time.Time
if b.opt.Compare.Modtime { if b.opt.Compare.Modtime {
modtime = o.ModTime(marchCtx).In(TZ) modtime = o.ModTime(b.march.marchCtx).In(TZ)
} }
id := "" // TODO id := "" // TODO
flags := "d" // "-" for a file and "d" for a directory flags := "d" // "-" for a file and "d" for a directory
marchLsLock.Lock() b.march.marchLsLock.Lock()
ls.put(o.Remote(), -1, modtime, "", id, flags) ls.put(o.Remote(), -1, modtime, "", id, flags)
marchLsLock.Unlock() b.march.marchLsLock.Unlock()
} }
func whichLs(isPath1 bool) *fileList { func (b *bisyncRun) whichLs(isPath1 bool) *fileList {
ls := ls1 ls := b.march.ls1
if !isPath1 { if !isPath1 {
ls = ls2 ls = b.march.ls2
} }
return ls return ls
} }
@@ -206,7 +208,7 @@ func (b *bisyncRun) findCheckFiles(ctx context.Context) (*fileList, *fileList, e
b.handleErr(b.opt.CheckFilename, "error adding CheckFilename to filter", filterCheckFile.Add(true, b.opt.CheckFilename), true, true) b.handleErr(b.opt.CheckFilename, "error adding CheckFilename to filter", filterCheckFile.Add(true, b.opt.CheckFilename), true, true)
b.handleErr(b.opt.CheckFilename, "error adding ** exclusion to filter", filterCheckFile.Add(false, "**"), true, true) b.handleErr(b.opt.CheckFilename, "error adding ** exclusion to filter", filterCheckFile.Add(false, "**"), true, true)
ci := fs.GetConfig(ctxCheckFile) ci := fs.GetConfig(ctxCheckFile)
marchCtx = ctxCheckFile b.march.marchCtx = ctxCheckFile
b.setupListing() b.setupListing()
fs.Debugf(b, "starting to march!") fs.Debugf(b, "starting to march!")
@@ -223,18 +225,18 @@ func (b *bisyncRun) findCheckFiles(ctx context.Context) (*fileList, *fileList, e
NoCheckDest: false, NoCheckDest: false,
NoUnicodeNormalization: ci.NoUnicodeNormalization, NoUnicodeNormalization: ci.NoUnicodeNormalization,
} }
err = m.Run(ctxCheckFile) b.march.err = m.Run(ctxCheckFile)
fs.Debugf(b, "march completed. err: %v", err) fs.Debugf(b, "march completed. err: %v", b.march.err)
if err == nil { if b.march.err == nil {
err = firstErr b.march.err = b.march.firstErr
} }
if err != nil { if b.march.err != nil {
b.handleErr("march", "error during findCheckFiles", err, true, true) b.handleErr("march", "error during findCheckFiles", b.march.err, true, true)
b.abort = true b.abort = true
} }
return ls1, ls2, err return b.march.ls1, b.march.ls2, b.march.err
} }
// ID returns the ID of the Object if known, or "" if not // ID returns the ID of the Object if known, or "" if not

View File

@@ -51,6 +51,11 @@ type bisyncRun struct {
lockFile string lockFile string
renames renames renames renames
resyncIs1to2 bool resyncIs1to2 bool
march bisyncMarch
check bisyncCheck
queueOpt bisyncQueueOpt
downloadHashOpt downloadHashOpt
lockFileOpt lockFileOpt
} }
type queues struct { type queues struct {
@@ -64,7 +69,6 @@ type queues struct {
// Bisync handles lock file, performs bisync run and checks exit status // Bisync handles lock file, performs bisync run and checks exit status
func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) { func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
defer resetGlobals()
opt := *optArg // ensure that input is never changed opt := *optArg // ensure that input is never changed
b := &bisyncRun{ b := &bisyncRun{
fs1: fs1, fs1: fs1,
@@ -124,6 +128,8 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
return err return err
} }
b.queueOpt.logger = operations.NewLoggerOpt()
// Handle SIGINT // Handle SIGINT
var finaliseOnce gosync.Once var finaliseOnce gosync.Once
@@ -161,7 +167,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
markFailed(b.listing1) markFailed(b.listing1)
markFailed(b.listing2) markFailed(b.listing2)
} }
b.removeLockFile() err = b.removeLockFile()
} }
}) })
} }
@@ -171,7 +177,10 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
// run bisync // run bisync
err = b.runLocked(ctx) err = b.runLocked(ctx)
b.removeLockFile() removeLockErr := b.removeLockFile()
if err == nil {
err = removeLockErr
}
b.CleanupCompleted = true b.CleanupCompleted = true
if b.InGracefulShutdown { if b.InGracefulShutdown {
@@ -297,7 +306,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
} }
fs.Infof(nil, "Building Path1 and Path2 listings") fs.Infof(nil, "Building Path1 and Path2 listings")
ls1, ls2, err = b.makeMarchListing(fctx) b.march.ls1, b.march.ls2, err = b.makeMarchListing(fctx)
if err != nil || accounting.Stats(fctx).Errored() { if err != nil || accounting.Stats(fctx).Errored() {
fs.Error(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue.")) fs.Error(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue."))
b.critical = true b.critical = true
@@ -307,7 +316,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
// Check for Path1 deltas relative to the prior sync // Check for Path1 deltas relative to the prior sync
fs.Infof(nil, "Path1 checking for diffs") fs.Infof(nil, "Path1 checking for diffs")
ds1, err := b.findDeltas(fctx, b.fs1, b.listing1, ls1, "Path1") ds1, err := b.findDeltas(fctx, b.fs1, b.listing1, b.march.ls1, "Path1")
if err != nil { if err != nil {
return err return err
} }
@@ -315,7 +324,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
// Check for Path2 deltas relative to the prior sync // Check for Path2 deltas relative to the prior sync
fs.Infof(nil, "Path2 checking for diffs") fs.Infof(nil, "Path2 checking for diffs")
ds2, err := b.findDeltas(fctx, b.fs2, b.listing2, ls2, "Path2") ds2, err := b.findDeltas(fctx, b.fs2, b.listing2, b.march.ls2, "Path2")
if err != nil { if err != nil {
return err return err
} }
@@ -389,7 +398,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
newl1, _ := b.loadListing(b.newListing1) newl1, _ := b.loadListing(b.newListing1)
newl2, _ := b.loadListing(b.newListing2) newl2, _ := b.loadListing(b.newListing2)
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, ls1 has name?: %v, ls2 has name?: %v", l1.has(b.DebugName), l2.has(b.DebugName))) b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, ls1 has name?: %v, ls2 has name?: %v", l1.has(b.DebugName), l2.has(b.DebugName)))
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, newls1 has name?: %v, newls2 has name?: %v", newl1.has(b.DebugName), newl2.has(b.DebugName))) b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, newls1 has name?: %v, ls2 has name?: %v", newl1.has(b.DebugName), newl2.has(b.DebugName)))
} }
b.saveOldListings() b.saveOldListings()
// save new listings // save new listings
@@ -553,7 +562,7 @@ func (b *bisyncRun) setBackupDir(ctx context.Context, destPath int) context.Cont
return ctx return ctx
} }
func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) error { func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) (err error) {
if operations.OverlappingFilterCheck(fctx, fs2, fs1) { if operations.OverlappingFilterCheck(fctx, fs2, fs1) {
err = errors.New(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters.")) err = errors.New(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters."))
return err return err
@@ -586,7 +595,7 @@ func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs)
return nil return nil
} }
func (b *bisyncRun) checkSyntax() error { func (b *bisyncRun) checkSyntax() (err error) {
// check for odd number of quotes in path, usually indicating an escaping issue // check for odd number of quotes in path, usually indicating an escaping issue
path1 := bilib.FsPath(b.fs1) path1 := bilib.FsPath(b.fs1)
path2 := bilib.FsPath(b.fs2) path2 := bilib.FsPath(b.fs2)
@@ -634,25 +643,3 @@ func waitFor(msg string, totalWait time.Duration, fn func() bool) (ok bool) {
} }
return false return false
} }
// mainly to make sure tests don't interfere with each other when running more than one
func resetGlobals() {
downloadHash = false
logger = operations.NewLoggerOpt()
ignoreListingChecksum = false
ignoreListingModtime = false
hashTypes = nil
queueCI = nil
hashType = 0
fsrc, fdst = nil, nil
fcrypt = nil
Opt = Options{}
once = gosync.Once{}
downloadHashWarn = gosync.Once{}
firstDownloadHash = gosync.Once{}
ls1 = newFileList()
ls2 = newFileList()
err = nil
firstErr = nil
marchCtx = nil
}

View File

@@ -51,19 +51,19 @@ func (rs *ResultsSlice) has(name string) bool {
return false return false
} }
var ( type bisyncQueueOpt struct {
logger = operations.NewLoggerOpt() logger operations.LoggerOpt
lock mutex.Mutex lock mutex.Mutex
once mutex.Once once mutex.Once
ignoreListingChecksum bool ignoreListingChecksum bool
ignoreListingModtime bool ignoreListingModtime bool
hashTypes map[string]hash.Type hashTypes map[string]hash.Type
queueCI *fs.ConfigInfo queueCI *fs.ConfigInfo
) }
// allows us to get the right hashtype during the LoggerFn without knowing whether it's Path1/Path2 // allows us to get the right hashtype during the LoggerFn without knowing whether it's Path1/Path2
func getHashType(fname string) hash.Type { func (b *bisyncRun) getHashType(fname string) hash.Type {
ht, ok := hashTypes[fname] ht, ok := b.queueOpt.hashTypes[fname]
if ok { if ok {
return ht return ht
} }
@@ -106,9 +106,9 @@ func altName(name string, src, dst fs.DirEntry) string {
} }
// WriteResults is Bisync's LoggerFn // WriteResults is Bisync's LoggerFn
func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) { func (b *bisyncRun) WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) {
lock.Lock() b.queueOpt.lock.Lock()
defer lock.Unlock() defer b.queueOpt.lock.Unlock()
opt := operations.GetLoggerOpt(ctx) opt := operations.GetLoggerOpt(ctx)
result := Results{ result := Results{
@@ -131,14 +131,14 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn
result.Flags = "-" result.Flags = "-"
if side != nil { if side != nil {
result.Size = side.Size() result.Size = side.Size()
if !ignoreListingModtime { if !b.queueOpt.ignoreListingModtime {
result.Modtime = side.ModTime(ctx).In(TZ) result.Modtime = side.ModTime(ctx).In(TZ)
} }
if !ignoreListingChecksum { if !b.queueOpt.ignoreListingChecksum {
sideObj, ok := side.(fs.ObjectInfo) sideObj, ok := side.(fs.ObjectInfo)
if ok { if ok {
result.Hash, _ = sideObj.Hash(ctx, getHashType(sideObj.Fs().Name())) result.Hash, _ = sideObj.Hash(ctx, b.getHashType(sideObj.Fs().Name()))
result.Hash, _ = tryDownloadHash(ctx, sideObj, result.Hash) result.Hash, _ = b.tryDownloadHash(ctx, sideObj, result.Hash)
} }
} }
@@ -159,8 +159,8 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn
} }
prettyprint(result, "writing result", fs.LogLevelDebug) prettyprint(result, "writing result", fs.LogLevelDebug)
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) { if result.Size < 0 && result.Flags != "d" && ((b.queueOpt.queueCI.CheckSum && !b.downloadHashOpt.downloadHash) || b.queueOpt.queueCI.SizeOnly) {
once.Do(func() { b.queueOpt.once.Do(func() {
fs.Log(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs")) fs.Log(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
}) })
} }
@@ -189,14 +189,14 @@ func ReadResults(results io.Reader) []Results {
// for setup code shared by both fastCopy and resyncDir // for setup code shared by both fastCopy and resyncDir
func (b *bisyncRun) preCopy(ctx context.Context) context.Context { func (b *bisyncRun) preCopy(ctx context.Context) context.Context {
queueCI = fs.GetConfig(ctx) b.queueOpt.queueCI = fs.GetConfig(ctx)
ignoreListingChecksum = b.opt.IgnoreListingChecksum b.queueOpt.ignoreListingChecksum = b.opt.IgnoreListingChecksum
ignoreListingModtime = !b.opt.Compare.Modtime b.queueOpt.ignoreListingModtime = !b.opt.Compare.Modtime
hashTypes = map[string]hash.Type{ b.queueOpt.hashTypes = map[string]hash.Type{
b.fs1.Name(): b.opt.Compare.HashType1, b.fs1.Name(): b.opt.Compare.HashType1,
b.fs2.Name(): b.opt.Compare.HashType2, b.fs2.Name(): b.opt.Compare.HashType2,
} }
logger.LoggerFn = WriteResults b.queueOpt.logger.LoggerFn = b.WriteResults
overridingEqual := false overridingEqual := false
if (b.opt.Compare.Modtime && b.opt.Compare.Checksum) || b.opt.Compare.DownloadHash { if (b.opt.Compare.Modtime && b.opt.Compare.Checksum) || b.opt.Compare.DownloadHash {
overridingEqual = true overridingEqual = true
@@ -209,15 +209,15 @@ func (b *bisyncRun) preCopy(ctx context.Context) context.Context {
fs.Debugf(nil, "overriding equal") fs.Debugf(nil, "overriding equal")
ctx = b.EqualFn(ctx) ctx = b.EqualFn(ctx)
} }
ctxCopyLogger := operations.WithSyncLogger(ctx, logger) ctxCopyLogger := operations.WithSyncLogger(ctx, b.queueOpt.logger)
if b.opt.Compare.Checksum && (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.opt.Compare.SlowHashDetected { if b.opt.Compare.Checksum && (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.opt.Compare.SlowHashDetected {
// set here in case !b.opt.Compare.Modtime // set here in case !b.opt.Compare.Modtime
queueCI = fs.GetConfig(ctxCopyLogger) b.queueOpt.queueCI = fs.GetConfig(ctxCopyLogger)
if b.opt.Compare.NoSlowHash { if b.opt.Compare.NoSlowHash {
queueCI.CheckSum = false b.queueOpt.queueCI.CheckSum = false
} }
if b.opt.Compare.SlowHashSyncOnly && !overridingEqual { if b.opt.Compare.SlowHashSyncOnly && !overridingEqual {
queueCI.CheckSum = true b.queueOpt.queueCI.CheckSum = true
} }
} }
return ctxCopyLogger return ctxCopyLogger
@@ -250,9 +250,9 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.
ctxCopy, b.CancelSync = context.WithCancel(ctxCopy) ctxCopy, b.CancelSync = context.WithCancel(ctxCopy)
b.testFn() b.testFn()
err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs) err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
prettyprint(logger, "logger", fs.LogLevelDebug) prettyprint(b.queueOpt.logger, "b.queueOpt.logger", fs.LogLevelDebug)
getResults := ReadResults(logger.JSON) getResults := ReadResults(b.queueOpt.logger.JSON)
fs.Debugf(nil, "Got %v results for %v", len(getResults), queueName) fs.Debugf(nil, "Got %v results for %v", len(getResults), queueName)
lineFormat := "%s %8d %s %s %s %q\n" lineFormat := "%s %8d %s %s %s %q\n"
@@ -292,9 +292,9 @@ func (b *bisyncRun) resyncDir(ctx context.Context, fsrc, fdst fs.Fs) ([]Results,
ctx = b.preCopy(ctx) ctx = b.preCopy(ctx)
err := sync.CopyDir(ctx, fdst, fsrc, b.opt.CreateEmptySrcDirs) err := sync.CopyDir(ctx, fdst, fsrc, b.opt.CreateEmptySrcDirs)
prettyprint(logger, "logger", fs.LogLevelDebug) prettyprint(b.queueOpt.logger, "b.queueOpt.logger", fs.LogLevelDebug)
getResults := ReadResults(logger.JSON) getResults := ReadResults(b.queueOpt.logger.JSON)
fs.Debugf(nil, "Got %v results for %v", len(getResults), "resync") fs.Debugf(nil, "Got %v results for %v", len(getResults), "resync")
return getResults, err return getResults, err

View File

@@ -135,7 +135,7 @@ type namePair struct {
newName string newName string
} }
func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias string, renameSkipped, copy1to2, copy2to1 *bilib.Names, ds1, ds2 *deltaSet) error { func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias string, renameSkipped, copy1to2, copy2to1 *bilib.Names, ds1, ds2 *deltaSet) (err error) {
winningPath := 0 winningPath := 0
if b.opt.ConflictResolve != PreferNone { if b.opt.ConflictResolve != PreferNone {
winningPath = b.conflictWinner(ds1, ds2, file, alias) winningPath = b.conflictWinner(ds1, ds2, file, alias)
@@ -261,15 +261,15 @@ func (ri *renamesInfo) getNames(is1to2 bool) (srcOldName, srcNewName, dstOldName
func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias string) int { func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias string) int {
for i := startnum; i < math.MaxInt; i++ { for i := startnum; i < math.MaxInt; i++ {
iStr := fmt.Sprint(i) iStr := fmt.Sprint(i)
if !ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) && if !b.march.ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
!ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) && !b.march.ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) &&
!ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) && !b.march.ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
!ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) { !b.march.ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) {
// make sure it still holds true with suffixes switched (it should) // make sure it still holds true with suffixes switched (it should)
if !ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) && if !b.march.ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
!ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) && !b.march.ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) &&
!ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) && !b.march.ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
!ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) { !b.march.ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) {
fs.Debugf(file, "The first available suffix is: %s", iStr) fs.Debugf(file, "The first available suffix is: %s", iStr)
return i return i
} }
@@ -280,10 +280,10 @@ func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias stri
// like numerate, but consider only one side's suffix (for when suffixes are different) // like numerate, but consider only one side's suffix (for when suffixes are different)
func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alias string, path int) int { func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alias string, path int) int {
lsA, lsB := ls1, ls2 lsA, lsB := b.march.ls1, b.march.ls2
suffix := b.opt.ConflictSuffix1 suffix := b.opt.ConflictSuffix1
if path == 2 { if path == 2 {
lsA, lsB = ls2, ls1 lsA, lsB = b.march.ls2, b.march.ls1
suffix = b.opt.ConflictSuffix2 suffix = b.opt.ConflictSuffix2
} }
for i := startnum; i < math.MaxInt; i++ { for i := startnum; i < math.MaxInt; i++ {
@@ -299,7 +299,7 @@ func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alia
return 0 // not really possible, as no one has 9223372036854775807 conflicts, and if they do, they have bigger problems return 0 // not really possible, as no one has 9223372036854775807 conflicts, and if they do, they have bigger problems
} }
func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum, winningPath int, q, renameSkipped *bilib.Names) error { func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum, winningPath int, q, renameSkipped *bilib.Names) (err error) {
if winningPath == thisPathNum { if winningPath == thisPathNum {
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.newName, fmt.Sprintf("Not renaming Path%d copy, as it was determined the winner", thisPathNum)) b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.newName, fmt.Sprintf("Not renaming Path%d copy, as it was determined the winner", thisPathNum))
} else { } else {
@@ -321,7 +321,7 @@ func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath,
return nil return nil
} }
func (b *bisyncRun) delete(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum int, renameSkipped *bilib.Names) error { func (b *bisyncRun) delete(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum int, renameSkipped *bilib.Names) (err error) {
skip := operations.SkipDestructive(ctx, thisNamePair.oldName, "delete") skip := operations.SkipDestructive(ctx, thisNamePair.oldName, "delete")
if !skip { if !skip {
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.oldName, fmt.Sprintf("Deleting Path%d copy", thisPathNum)) b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.oldName, fmt.Sprintf("Deleting Path%d copy", thisPathNum))

View File

@@ -41,12 +41,12 @@ func (b *bisyncRun) setResyncDefaults() {
// It will generate path1 and path2 listings, // It will generate path1 and path2 listings,
// copy any unique files to the opposite path, // copy any unique files to the opposite path,
// and resolve any differing files according to the --resync-mode. // and resolve any differing files according to the --resync-mode.
func (b *bisyncRun) resync(octx, fctx context.Context) error { func (b *bisyncRun) resync(octx, fctx context.Context) (err error) {
fs.Infof(nil, "Copying Path2 files to Path1") fs.Infof(nil, "Copying Path2 files to Path1")
// Save blank filelists (will be filled from sync results) // Save blank filelists (will be filled from sync results)
var ls1 = newFileList() ls1 := newFileList()
var ls2 = newFileList() ls2 := newFileList()
err = ls1.save(fctx, b.newListing1) err = ls1.save(fctx, b.newListing1)
if err != nil { if err != nil {
b.handleErr(ls1, "error saving ls1 from resync", err, true, true) b.handleErr(ls1, "error saving ls1 from resync", err, true, true)

View File

@@ -1815,6 +1815,9 @@ about _Unison_ and synchronization in general.
## Changelog ## Changelog
### `v1.71`
* Fixed an issue causing errors when running concurrent bisync runs through the `rc`.
### `v1.69.1` ### `v1.69.1`
* Fixed an issue causing listings to not capture concurrent modifications under certain conditions * Fixed an issue causing listings to not capture concurrent modifications under certain conditions