Remove github.com/pkg/errors and replace with std library version

This is possible now that we no longer support go1.12 and brings
rclone into line with standard practices in the Go world.

This also removes errors.New and errors.Errorf from lib/errors and
prefers the stdlib errors package over lib/errors.
This commit is contained in:
Nick Craig-Wood
2021-11-04 10:12:57 +00:00
parent 97328e5755
commit e43b5ce5e5
233 changed files with 1673 additions and 1695 deletions

View File

@@ -5,6 +5,7 @@ package cache
import (
"context"
"errors"
"fmt"
"io"
"math"
@@ -19,7 +20,6 @@ import (
"syscall"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
@@ -356,7 +356,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
return nil, err
}
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
return nil, errors.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
return nil, fmt.Errorf("don't set cache-chunk-total-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
}
@@ -366,13 +366,13 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
rpath, err := parseRootPath(rootPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
return nil, fmt.Errorf("failed to clean root path %q: %w", rootPath, err)
}
remotePath := fspath.JoinRootPath(opt.Remote, rootPath)
wrappedFs, wrapErr := cache.Get(ctx, remotePath)
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remotePath, wrapErr)
}
var fsErr error
fs.Debugf(name, "wrapped %v:%v at root %v", wrappedFs.Name(), wrappedFs.Root(), rpath)
@@ -401,7 +401,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if opt.PlexToken != "" {
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken, opt.PlexInsecure)
if err != nil {
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
} else {
if opt.PlexPassword != "" && opt.PlexUsername != "" {
@@ -413,7 +413,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
m.Set("plex_token", token)
})
if err != nil {
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
return nil, fmt.Errorf("failed to connect to the Plex API %v: %w", opt.PlexURL, err)
}
}
}
@@ -434,11 +434,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
}
err = os.MkdirAll(dbPath, os.ModePerm)
if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", dbPath)
return nil, fmt.Errorf("failed to create cache directory %v: %w", dbPath, err)
}
err = os.MkdirAll(chunkPath, os.ModePerm)
if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", chunkPath)
return nil, fmt.Errorf("failed to create cache directory %v: %w", chunkPath, err)
}
dbPath = filepath.Join(dbPath, name+".db")
@@ -450,7 +450,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
DbWaitTime: time.Duration(opt.DbWaitTime),
})
if err != nil {
return nil, errors.Wrapf(err, "failed to start cache db")
return nil, fmt.Errorf("failed to start cache db: %w", err)
}
// Trap SIGINT and SIGTERM to close the DB handle gracefully
c := make(chan os.Signal, 1)
@@ -484,12 +484,12 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
if f.opt.TempWritePath != "" {
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
if err != nil {
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
return nil, fmt.Errorf("failed to create cache directory %v: %w", f.opt.TempWritePath, err)
}
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
if err != nil {
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
return nil, fmt.Errorf("failed to create temp fs: %v: %w", err, err)
}
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
@@ -606,7 +606,7 @@ func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err er
out = make(rc.Params)
m, err := f.Stats()
if err != nil {
return out, errors.Errorf("error while getting cache stats")
return out, fmt.Errorf("error while getting cache stats")
}
out["status"] = "ok"
out["stats"] = m
@@ -633,7 +633,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
out = make(rc.Params)
remoteInt, ok := in["remote"]
if !ok {
return out, errors.Errorf("remote is needed")
return out, fmt.Errorf("remote is needed")
}
remote := remoteInt.(string)
withData := false
@@ -644,7 +644,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
remote = f.unwrapRemote(remote)
if !f.cache.HasEntry(path.Join(f.Root(), remote)) {
return out, errors.Errorf("%s doesn't exist in cache", remote)
return out, fmt.Errorf("%s doesn't exist in cache", remote)
}
co := NewObject(f, remote)
@@ -653,7 +653,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
cd := NewDirectory(f, remote)
err := f.cache.ExpireDir(cd)
if err != nil {
return out, errors.WithMessage(err, "error expiring directory")
return out, fmt.Errorf("error expiring directory: %w", err)
}
// notify vfs too
f.notifyChangeUpstream(cd.Remote(), fs.EntryDirectory)
@@ -664,7 +664,7 @@ func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params,
// expire the entry
err = f.cache.ExpireObject(co, withData)
if err != nil {
return out, errors.WithMessage(err, "error expiring file")
return out, fmt.Errorf("error expiring file: %w", err)
}
// notify vfs too
f.notifyChangeUpstream(co.Remote(), fs.EntryObject)
@@ -685,24 +685,24 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
case 1:
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
end = start + 1
case 2:
if ints[0] != "" {
start, err = strconv.ParseInt(ints[0], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
}
if ints[1] != "" {
end, err = strconv.ParseInt(ints[1], 10, 64)
if err != nil {
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
}
default:
return nil, errors.Errorf("invalid range: %q", part)
return nil, fmt.Errorf("invalid range: %q", part)
}
crs = append(crs, chunkRange{start: start, end: end})
}
@@ -757,18 +757,18 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
delete(in, "chunks")
crs, err := parseChunks(s)
if err != nil {
return nil, errors.Wrap(err, "invalid chunks parameter")
return nil, fmt.Errorf("invalid chunks parameter: %w", err)
}
var files [][2]string
for k, v := range in {
if !strings.HasPrefix(k, "file") {
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
}
switch v := v.(type) {
case string:
files = append(files, [2]string{v, f.unwrapRemote(v)})
default:
return nil, errors.Errorf("invalid parameter %s=%s", k, v)
return nil, fmt.Errorf("invalid parameter %s=%s", k, v)
}
}
type fileStatus struct {
@@ -1124,7 +1124,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
case fs.Directory:
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
default:
return errors.Errorf("Unknown object type %T", entry)
return fmt.Errorf("Unknown object type %T", entry)
}
}

View File

@@ -7,6 +7,7 @@ import (
"bytes"
"context"
"encoding/base64"
"errors"
goflag "flag"
"fmt"
"io"
@@ -22,7 +23,6 @@ import (
"testing"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/cache"
"github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/drive"
@@ -446,7 +446,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
return err
}
if coSize != expectedSize {
return errors.Errorf("%v <> %v", coSize, expectedSize)
return fmt.Errorf("%v <> %v", coSize, expectedSize)
}
return nil
}, 12, time.Second*10)
@@ -502,7 +502,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 2 {
log.Printf("not expected listing /test: %v", li)
return errors.Errorf("not expected listing /test: %v", li)
return fmt.Errorf("not expected listing /test: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/one")
@@ -512,7 +512,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 0 {
log.Printf("not expected listing /test/one: %v", li)
return errors.Errorf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
li, err = runInstance.list(t, rootFs, "test/second")
@@ -522,21 +522,21 @@ func TestInternalMoveWithNotify(t *testing.T) {
}
if len(li) != 1 {
log.Printf("not expected listing /test/second: %v", li)
return errors.Errorf("not expected listing /test/second: %v", li)
return fmt.Errorf("not expected listing /test/second: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "data.bin" {
log.Printf("not expected name: %v", fi.Name())
return errors.Errorf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/second/data.bin" {
log.Printf("not expected remote: %v", di.Remote())
return errors.Errorf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
log.Printf("unexpected listing: %v", li)
return errors.Errorf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
log.Printf("complete listing: %v", li)
@@ -591,17 +591,17 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
if !found {
log.Printf("not found /test")
return errors.Errorf("not found /test")
return fmt.Errorf("not found /test")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
if !found {
log.Printf("not found /test/one")
return errors.Errorf("not found /test/one")
return fmt.Errorf("not found /test/one")
}
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
if !found {
log.Printf("not found /test/one/test2")
return errors.Errorf("not found /test/one/test2")
return fmt.Errorf("not found /test/one/test2")
}
li, err := runInstance.list(t, rootFs, "test/one")
if err != nil {
@@ -610,21 +610,21 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
}
if len(li) != 1 {
log.Printf("not expected listing /test/one: %v", li)
return errors.Errorf("not expected listing /test/one: %v", li)
return fmt.Errorf("not expected listing /test/one: %v", li)
}
if fi, ok := li[0].(os.FileInfo); ok {
if fi.Name() != "test2" {
log.Printf("not expected name: %v", fi.Name())
return errors.Errorf("not expected name: %v", fi.Name())
return fmt.Errorf("not expected name: %v", fi.Name())
}
} else if di, ok := li[0].(fs.DirEntry); ok {
if di.Remote() != "test/one/test2" {
log.Printf("not expected remote: %v", di.Remote())
return errors.Errorf("not expected remote: %v", di.Remote())
return fmt.Errorf("not expected remote: %v", di.Remote())
}
} else {
log.Printf("unexpected listing: %v", li)
return errors.Errorf("unexpected listing: %v", li)
return fmt.Errorf("unexpected listing: %v", li)
}
log.Printf("complete listing /test/one/test2")
return nil
@@ -1062,7 +1062,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
checkSample = r.readDataFromObj(t, co, offset, end, noLengthCheck)
if !noLengthCheck && size != int64(len(checkSample)) {
return checkSample, errors.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
return checkSample, fmt.Errorf("read size doesn't match expected: %v <> %v", len(checkSample), size)
}
return checkSample, nil
}
@@ -1257,7 +1257,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
case state = <-buCh:
// continue
case <-time.After(maxDuration):
waitCh <- errors.Errorf("Timed out waiting for background upload: %v", remote)
waitCh <- fmt.Errorf("Timed out waiting for background upload: %v", remote)
return
}
checkRemote := state.Remote
@@ -1274,7 +1274,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
return
}
}
waitCh <- errors.Errorf("Too many attempts to wait for the background upload: %v", remote)
waitCh <- fmt.Errorf("Too many attempts to wait for the background upload: %v", remote)
}()
return waitCh
}

View File

@@ -5,6 +5,7 @@ package cache
import (
"context"
"errors"
"fmt"
"io"
"path"
@@ -13,7 +14,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
)
@@ -243,7 +243,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
return nil, io.ErrUnexpectedEOF
}
return nil, errors.Errorf("chunk not found %v", chunkStart)
return nil, fmt.Errorf("chunk not found %v", chunkStart)
}
// first chunk will be aligned with the start
@@ -323,7 +323,7 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
r.offset = r.cachedObject.Size() + offset
default:
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
err = fmt.Errorf("cache: unimplemented seek whence %v", whence)
}
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))

View File

@@ -5,12 +5,12 @@ package cache
import (
"context"
"fmt"
"io"
"path"
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
@@ -178,10 +178,14 @@ func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
}
if o.isTempFile() {
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
if err != nil {
err = fmt.Errorf("in parent fs %v: %w", o.ParentFs, err)
}
} else {
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
if err != nil {
err = fmt.Errorf("in cache fs %v: %w", o.CacheFs.Fs, err)
}
}
if err != nil {
fs.Errorf(o, "error refreshing object in : %v", err)
@@ -253,7 +257,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't update", o)
return fmt.Errorf("%v is currently uploading, can't update", o)
}
}
fs.Debugf(o, "updating object contents with size %v", src.Size())
@@ -292,7 +296,7 @@ func (o *Object) Remove(ctx context.Context) error {
defer o.CacheFs.backgroundRunner.play()
// don't allow started uploads
if o.isTempFile() && o.tempFileStartedUpload() {
return errors.Errorf("%v is currently uploading, can't delete", o)
return fmt.Errorf("%v is currently uploading, can't delete", o)
}
}
err := o.Object.Remove(ctx)

View File

@@ -4,12 +4,12 @@
package cache
import (
"fmt"
"strconv"
"strings"
"time"
cache "github.com/patrickmn/go-cache"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
@@ -53,7 +53,7 @@ func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
return data, nil
}
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
return nil, fmt.Errorf("couldn't get cached object data at offset %v", offset)
}
// AddChunk adds a new chunk of a cached object

View File

@@ -17,7 +17,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"
bolt "go.etcd.io/bbolt"
@@ -120,11 +119,11 @@ func (b *Persistent) connect() error {
err = os.MkdirAll(b.dataPath, os.ModePerm)
if err != nil {
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
return fmt.Errorf("failed to create a data directory %q: %w", b.dataPath, err)
}
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
if err != nil {
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
return fmt.Errorf("failed to open a cache connection to %q: %w", b.dbPath, err)
}
if b.features.PurgeDb {
b.Purge()
@@ -176,7 +175,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(remote, false, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", remote)
return fmt.Errorf("couldn't open bucket (%v)", remote)
}
data := bucket.Get([]byte("."))
@@ -184,7 +183,7 @@ func (b *Persistent) GetDir(remote string) (*Directory, error) {
return json.Unmarshal(data, cd)
}
return errors.Errorf("%v not found", remote)
return fmt.Errorf("%v not found", remote)
})
return cd, err
@@ -209,7 +208,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
bucket = b.getBucket(cachedDirs[0].Dir, true, tx)
}
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
return fmt.Errorf("couldn't open bucket (%v)", cachedDirs[0].Dir)
}
for _, cachedDir := range cachedDirs {
@@ -226,7 +225,7 @@ func (b *Persistent) AddBatchDir(cachedDirs []*Directory) error {
encoded, err := json.Marshal(cachedDir)
if err != nil {
return errors.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
return fmt.Errorf("couldn't marshal object (%v): %v", cachedDir, err)
}
err = b.Put([]byte("."), encoded)
if err != nil {
@@ -244,17 +243,17 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedDir.abs(), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", cachedDir.abs())
return fmt.Errorf("couldn't open bucket (%v)", cachedDir.abs())
}
val := bucket.Get([]byte("."))
if val != nil {
err := json.Unmarshal(val, cachedDir)
if err != nil {
return errors.Errorf("error during unmarshalling obj: %v", err)
return fmt.Errorf("error during unmarshalling obj: %v", err)
}
} else {
return errors.Errorf("missing cached dir: %v", cachedDir)
return fmt.Errorf("missing cached dir: %v", cachedDir)
}
c := bucket.Cursor()
@@ -269,7 +268,7 @@ func (b *Persistent) GetDirEntries(cachedDir *Directory) (fs.DirEntries, error)
// we try to find a cached meta for the dir
currentBucket := c.Bucket().Bucket(k)
if currentBucket == nil {
return errors.Errorf("couldn't open bucket (%v)", string(k))
return fmt.Errorf("couldn't open bucket (%v)", string(k))
}
metaKey := currentBucket.Get([]byte("."))
@@ -318,7 +317,7 @@ func (b *Persistent) RemoveDir(fp string) error {
err = b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open bucket (%v)", fp)
return fmt.Errorf("couldn't open bucket (%v)", fp)
}
// delete the cached dir
err := bucket.DeleteBucket([]byte(cleanPath(dirName)))
@@ -378,13 +377,13 @@ func (b *Persistent) GetObject(cachedObject *Object) (err error) {
return b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject.Dir)
}
val := bucket.Get([]byte(cachedObject.Name))
if val != nil {
return json.Unmarshal(val, cachedObject)
}
return errors.Errorf("couldn't find object (%v)", cachedObject.Name)
return fmt.Errorf("couldn't find object (%v)", cachedObject.Name)
})
}
@@ -393,16 +392,16 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cachedObject.Dir, true, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cachedObject)
return fmt.Errorf("couldn't open parent bucket for %v", cachedObject)
}
// cache Object Info
encoded, err := json.Marshal(cachedObject)
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
return fmt.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
}
err = bucket.Put([]byte(cachedObject.Name), encoded)
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
return fmt.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
}
return nil
})
@@ -414,7 +413,7 @@ func (b *Persistent) RemoveObject(fp string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket := b.getBucket(cleanPath(parentDir), false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
return fmt.Errorf("couldn't open parent bucket for %v", cleanPath(parentDir))
}
err := bucket.Delete([]byte(cleanPath(objName)))
if err != nil {
@@ -446,7 +445,7 @@ func (b *Persistent) HasEntry(remote string) bool {
err := b.db.View(func(tx *bolt.Tx) error {
bucket := b.getBucket(dir, false, tx)
if bucket == nil {
return errors.Errorf("couldn't open parent bucket for %v", remote)
return fmt.Errorf("couldn't open parent bucket for %v", remote)
}
if f := bucket.Bucket([]byte(name)); f != nil {
return nil
@@ -455,7 +454,7 @@ func (b *Persistent) HasEntry(remote string) bool {
return nil
}
return errors.Errorf("couldn't find object (%v)", remote)
return fmt.Errorf("couldn't find object (%v)", remote)
})
if err == nil {
return true
@@ -555,7 +554,7 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
err := b.db.Update(func(tx *bolt.Tx) error {
dataTsBucket := tx.Bucket([]byte(DataTsBucket))
if dataTsBucket == nil {
return errors.Errorf("Couldn't open (%v) bucket", DataTsBucket)
return fmt.Errorf("Couldn't open (%v) bucket", DataTsBucket)
}
// iterate through ts
c := dataTsBucket.Cursor()
@@ -733,7 +732,7 @@ func (b *Persistent) GetChunkTs(path string, offset int64) (time.Time, error) {
return nil
}
}
return errors.Errorf("not found %v-%v", path, offset)
return fmt.Errorf("not found %v-%v", path, offset)
})
return t, err
@@ -773,7 +772,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
tempObj := &tempUploadInfo{
DestPath: destPath,
@@ -784,11 +783,11 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
// cache Object Info
encoded, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
return fmt.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
}
err = bucket.Put([]byte(destPath), encoded)
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
return nil
@@ -803,7 +802,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
err = b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
c := bucket.Cursor()
@@ -836,7 +835,7 @@ func (b *Persistent) getPendingUpload(inRoot string, waitTime time.Duration) (de
return nil
}
return errors.Errorf("no pending upload found")
return fmt.Errorf("no pending upload found")
})
return destPath, err
@@ -847,14 +846,14 @@ func (b *Persistent) SearchPendingUpload(remote string) (started bool, err error
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(tempBucket))
if bucket == nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err)
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
}
started = tempObj.Started
@@ -869,7 +868,7 @@ func (b *Persistent) searchPendingUploadFromDir(dir string) (remotes []string, e
err = b.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(tempBucket))
if bucket == nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
c := bucket.Cursor()
@@ -899,22 +898,22 @@ func (b *Persistent) rollbackPendingUpload(remote string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err)
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
}
tempObj.Started = false
v2, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated %v", err)
}
err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated %v", err)
}
return nil
})
@@ -927,7 +926,7 @@ func (b *Persistent) removePendingUpload(remote string) error {
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
return bucket.Delete([]byte(remote))
})
@@ -942,17 +941,17 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
return b.db.Update(func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
if err != nil {
return errors.Errorf("couldn't bucket for %v", tempBucket)
return fmt.Errorf("couldn't bucket for %v", tempBucket)
}
var tempObj = &tempUploadInfo{}
v := bucket.Get([]byte(remote))
err = json.Unmarshal(v, tempObj)
if err != nil {
return errors.Errorf("pending upload (%v) not found %v", remote, err)
return fmt.Errorf("pending upload (%v) not found %v", remote, err)
}
if tempObj.Started {
return errors.Errorf("pending upload already started %v", remote)
return fmt.Errorf("pending upload already started %v", remote)
}
err = fn(tempObj)
if err != nil {
@@ -970,11 +969,11 @@ func (b *Persistent) updatePendingUpload(remote string, fn func(item *tempUpload
}
v2, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated %v", err)
}
err = bucket.Put([]byte(tempObj.DestPath), v2)
if err != nil {
return errors.Errorf("pending upload not updated %v", err)
return fmt.Errorf("pending upload not updated %v", err)
}
return nil
@@ -1015,11 +1014,11 @@ func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) erro
// cache Object Info
encoded, err := json.Marshal(tempObj)
if err != nil {
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
return fmt.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
}
err = bucket.Put([]byte(destPath), encoded)
if err != nil {
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
return fmt.Errorf("couldn't cache object (%v) info: %v", destPath, err)
}
fs.Debugf(cacheFs, "reconciled temporary upload: %v", destPath)
}