From fa4b299c3d624959b70ab9c3654e67d0d544dabd Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Wed, 3 Dec 2025 16:43:04 +0000 Subject: [PATCH] Add Drime backend - FIXME WIP Unit tests a long way from passing! --- backend/all/all.go | 1 + backend/drime/api/types.go | 369 +++++++++ backend/drime/drime.go | 1537 +++++++++++++++++++++++++++++++++++ backend/drime/drime_test.go | 17 + 4 files changed, 1924 insertions(+) create mode 100644 backend/drime/api/types.go create mode 100644 backend/drime/drime.go create mode 100644 backend/drime/drime_test.go diff --git a/backend/all/all.go b/backend/all/all.go index 8a3c08802..9a2150a48 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -16,6 +16,7 @@ import ( _ "github.com/rclone/rclone/backend/compress" _ "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/doi" + _ "github.com/rclone/rclone/backend/drime" _ "github.com/rclone/rclone/backend/drive" _ "github.com/rclone/rclone/backend/dropbox" _ "github.com/rclone/rclone/backend/fichier" diff --git a/backend/drime/api/types.go b/backend/drime/api/types.go new file mode 100644 index 000000000..e1dc5904c --- /dev/null +++ b/backend/drime/api/types.go @@ -0,0 +1,369 @@ +// Package api has type definitions for drime +// +// Converted from the API docs with help from https://mholt.github.io/json-to-go/ +package api + +import ( + "encoding/json" + "fmt" + "time" +) + +// Types of things in Item +const ( + ItemTypeFolder = "folder" +) + +type User struct { + Email string `json:"email"` + ID json.Number `json:"id"` + Avatar string `json:"avatar"` + ModelType string `json:"model_type"` + OwnsEntry bool `json:"owns_entry"` + EntryPermissions []any `json:"entry_permissions"` + DisplayName string `json:"display_name"` +} + +type Permissions struct { + FilesUpdate bool `json:"files.update"` + FilesCreate bool `json:"files.create"` + FilesDownload bool `json:"files.download"` + FilesDelete bool `json:"files.delete"` +} + +// Item describes a folder or a file as returned by /drive/file-entries +type Item struct { + ID json.Number `json:"id"` + Name string `json:"name"` + Description any `json:"description"` + FileName string `json:"file_name"` + Mime string `json:"mime"` + Color any `json:"color"` + Backup bool `json:"backup"` + Tracked int `json:"tracked"` + FileSize int64 `json:"file_size"` + UserID json.Number `json:"user_id"` + ParentID json.Number `json:"parent_id"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + DeletedAt any `json:"deleted_at"` + IsDeleted int `json:"is_deleted"` + Path string `json:"path"` + DiskPrefix any `json:"disk_prefix"` + Type string `json:"type"` + Extension any `json:"extension"` + FileHash any `json:"file_hash"` + Public bool `json:"public"` + Thumbnail bool `json:"thumbnail"` + MuxStatus any `json:"mux_status"` + ThumbnailURL any `json:"thumbnail_url"` + WorkspaceID int `json:"workspace_id"` + IsEncrypted int `json:"is_encrypted"` + Iv any `json:"iv"` + VaultID any `json:"vault_id"` + OwnerID int `json:"owner_id"` + Hash string `json:"hash"` + URL string `json:"url"` + Users []User `json:"users"` + Tags []any `json:"tags"` + Permissions Permissions `json:"permissions"` +} + +type Listing struct { + CurrentPage int `json:"current_page"` + Data []Item `json:"data"` + From int `json:"from"` + LastPage int `json:"last_page"` + NextPage int `json:"next_page"` + PerPage int `json:"per_page"` + PrevPage int `json:"prev_page"` + To int `json:"to"` + Total int `json:"total"` +} + +type UploadResponse struct { + Status string `json:"status"` + FileEntry Item `json:"fileEntry"` +} + +type CreateFolderRequest struct { + Name string `json:"name"` + ParentID json.Number `json:"parentId,omitempty"` +} + +type CreateFolderResponse struct { + Status string `json:"status"` + Folder Item `json:"fileEntry"` +} + +const ( + // 2017-05-03T07:26:10-07:00 + timeFormat = `"` + time.RFC3339 + `"` +) + +// Time represents date and time information for the +// drime API, by using RFC3339 +type Time time.Time + +// MarshalJSON turns a Time into JSON (in UTC) +func (t *Time) MarshalJSON() (out []byte, err error) { + timeString := (*time.Time)(t).Format(timeFormat) + return []byte(timeString), nil +} + +// UnmarshalJSON turns JSON into a Time +func (t *Time) UnmarshalJSON(data []byte) error { + newT, err := time.Parse(timeFormat, string(data)) + if err != nil { + return err + } + *t = Time(newT) + return nil +} + +// Error is returned from drime when things go wrong +type Error struct { + Status string `json:"status"` +} + +// Error returns a string for the error and satisfies the error interface +func (e Error) Error() string { + out := fmt.Sprintf("Error %q", e.Status) + return out +} + +// IsError returns true if there is an error +func (e Error) IsError() bool { + return e.Status != "ok" +} + +// Err returns err if not nil, or e if IsError or nil +func (e Error) Err(err error) error { + if err != nil { + return err + } + if e.IsError() { + return e + } + return nil +} + +// Check Error satisfies the error interface +var _ error = (*Error)(nil) + +// Item describes a folder or a file as returned by /contents +type XXXItem struct { + ID string `json:"id"` + ParentFolder string `json:"parentFolder"` + Type string `json:"type"` + Name string `json:"name"` + Size int64 `json:"size"` + Code string `json:"code"` + CreateTime int64 `json:"createTime"` + ModTime int64 `json:"modTime"` + Link string `json:"link"` + MD5 string `json:"md5"` + MimeType string `json:"mimetype"` + ChildrenCount int `json:"childrenCount"` + DirectLinks map[string]*DirectLink `json:"directLinks"` + //Public bool `json:"public"` + //ServerSelected string `json:"serverSelected"` + //Thumbnail string `json:"thumbnail"` + //DownloadCount int `json:"downloadCount"` + //TotalDownloadCount int64 `json:"totalDownloadCount"` + //TotalSize int64 `json:"totalSize"` + //ChildrenIDs []string `json:"childrenIds"` + Children map[string]*Item `json:"children"` +} + +// ToNativeTime converts a go time to a native time +func ToNativeTime(t time.Time) int64 { + return t.Unix() +} + +// FromNativeTime converts native time to a go time +func FromNativeTime(t int64) time.Time { + return time.Unix(t, 0) +} + +// DirectLink describes a direct link to a file so it can be +// downloaded by third parties. +type DirectLink struct { + ExpireTime int64 `json:"expireTime"` + SourceIpsAllowed []any `json:"sourceIpsAllowed"` + DomainsAllowed []any `json:"domainsAllowed"` + Auth []any `json:"auth"` + IsReqLink bool `json:"isReqLink"` + DirectLink string `json:"directLink"` +} + +// Contents is returned from the /contents call +type Contents struct { + Error + Data struct { + Item + } `json:"data"` + Metadata Metadata `json:"metadata"` +} + +// Metadata is returned when paging is in use +type Metadata struct { + TotalCount int `json:"totalCount"` + TotalPages int `json:"totalPages"` + Page int `json:"page"` + PageSize int `json:"pageSize"` + HasNextPage bool `json:"hasNextPage"` +} + +// AccountsGetID is the result of /accounts/getid +type AccountsGetID struct { + Error + Data struct { + ID string `json:"id"` + } `json:"data"` +} + +// Stats of storage and traffic +type Stats struct { + FolderCount int64 `json:"folderCount"` + FileCount int64 `json:"fileCount"` + Storage int64 `json:"storage"` + TrafficDirectGenerated int64 `json:"trafficDirectGenerated"` + TrafficReqDownloaded int64 `json:"trafficReqDownloaded"` + TrafficWebDownloaded int64 `json:"trafficWebDownloaded"` +} + +// AccountsGet is the result of /accounts/{id} +type AccountsGet struct { + Error + Data struct { + ID string `json:"id"` + Email string `json:"email"` + Tier string `json:"tier"` + PremiumType string `json:"premiumType"` + Token string `json:"token"` + RootFolder string `json:"rootFolder"` + SubscriptionProvider string `json:"subscriptionProvider"` + SubscriptionEndDate int `json:"subscriptionEndDate"` + SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"` + SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"` + StatsCurrent Stats `json:"statsCurrent"` + // StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"` + } `json:"data"` +} + +// CreateFolderRequest is the input to /contents/createFolder +// type CreateFolderRequest struct { +// ParentFolderID string `json:"parentFolderId"` +// FolderName string `json:"folderName"` +// ModTime int64 `json:"modTime,omitempty"` +// } + +// CreateFolderResponse is the output from /contents/createFolder +// type CreateFolderResponse struct { +// Error +// Data Item `json:"data"` +// } + +// DeleteRequest is the input to DELETE /contents +type DeleteRequest struct { + ContentsID string `json:"contentsId"` // comma separated list of IDs +} + +// DeleteResponse is the input to DELETE /contents +type DeleteResponse struct { + Error + Data map[string]Error +} + +// DirectUploadURL returns the direct upload URL for Drime +func DirectUploadURL() string { + return "https://upload.drime.io/uploadfile" +} + +// UploadResponse is returned by POST /contents/uploadfile +// type UploadResponse struct { +// Error +// Data Item `json:"data"` +// } + +// DirectLinksRequest specifies the parameters for the direct link +type DirectLinksRequest struct { + ExpireTime int64 `json:"expireTime,omitempty"` + SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"` + DomainsAllowed []any `json:"domainsAllowed,omitempty"` + Auth []any `json:"auth,omitempty"` +} + +// DirectLinksResult is returned from POST /contents/{id}/directlinks +type DirectLinksResult struct { + Error + Data struct { + ExpireTime int64 `json:"expireTime"` + SourceIpsAllowed []any `json:"sourceIpsAllowed"` + DomainsAllowed []any `json:"domainsAllowed"` + Auth []any `json:"auth"` + IsReqLink bool `json:"isReqLink"` + ID string `json:"id"` + DirectLink string `json:"directLink"` + } `json:"data"` +} + +// UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update +// +// The Value of the attribute to define : +// For Attribute "name" : The name of the content (file or folder) +// For Attribute "description" : The description displayed on the download page (folder only) +// For Attribute "tags" : A comma-separated list of tags (folder only) +// For Attribute "public" : either true or false (folder only) +// For Attribute "expiry" : A unix timestamp of the expiration date (folder only) +// For Attribute "password" : The password to set (folder only) +type UpdateItemRequest struct { + Attribute string `json:"attribute"` + Value any `json:"attributeValue"` +} + +// UpdateItemResponse is returned by PUT /contents/{id}/update +type UpdateItemResponse struct { + Error + Data Item `json:"data"` +} + +// MoveRequest is the input to /contents/move +type MoveRequest struct { + FolderID string `json:"folderId"` + ContentsID string `json:"contentsId"` // comma separated list of IDs +} + +// MoveResponse is returned by POST /contents/move +type MoveResponse struct { + Error + Data map[string]struct { + Error + Item `json:"data"` + } `json:"data"` +} + +// CopyRequest is the input to /contents/copy +type CopyRequest struct { + FolderID string `json:"folderId"` + ContentsID string `json:"contentsId"` // comma separated list of IDs +} + +// CopyResponse is returned by POST /contents/copy +type CopyResponse struct { + Error + Data map[string]struct { + Error + Item `json:"data"` + } `json:"data"` +} + +// UploadServerStatus is returned when fetching the root of an upload server +type UploadServerStatus struct { + Error + Data struct { + Server string `json:"server"` + Test string `json:"test"` + } `json:"data"` +} diff --git a/backend/drime/drime.go b/backend/drime/drime.go new file mode 100644 index 000000000..bf7235f69 --- /dev/null +++ b/backend/drime/drime.go @@ -0,0 +1,1537 @@ +// Package drime provides an interface to the Drime +// object storage system. +package drime + +/* +Return results give + + X-Ratelimit-Limit: 2000 + X-Ratelimit-Remaining: 1999 + +The rate limit headers indicate the number of allowed API requests per +minute. The limit is two thousand requests per minute, and rclone +should stay under that. +*/ + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/rclone/rclone/backend/drime/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/dircache" + "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/rest" +) + +const ( + minSleep = 10 * time.Millisecond + maxSleep = 20 * time.Second + decayConstant = 1 // bigger for slower decay, exponential + baseURL = "https://app.drime.cloud/" + rootURL = baseURL + "api/v1" + rateLimitSleep = 5 * time.Second // penalise a goroutine by this long for making a rate limit error + maxDepth = 4 // in ListR recursive list this deep (maximum is 16) +) + +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "drime", + Description: "Drime", + NewFs: NewFs, + Options: []fs.Option{{ + Name: "access_token", + Help: `API Access token + +You can get this from the web control panel.`, + Sensitive: true, + }, { + Name: "root_folder_id", + Help: `ID of the root folder + +Leave this blank normally, rclone will fill it in automatically. + +If you want rclone to be restricted to a particular folder you can +fill it in - see the docs for more info. +`, + Default: "", + Advanced: true, + Sensitive: true, + }, { + Name: "workspace_id", + Help: `Account ID + +Leave this blank normally, rclone will fill it in automatically. +`, + Default: "", + Advanced: true, + Sensitive: true, + }, { + Name: "list_chunk", + Help: `Number of items to list in each call`, + Default: 1000, + Advanced: true, + }, { + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + Default: (encoder.Display | // Slash Control Delete Dot + encoder.EncodeDoubleQuote | + encoder.EncodeAsterisk | + encoder.EncodeColon | + encoder.EncodeLtGt | + encoder.EncodeQuestion | + encoder.EncodeBackSlash | + encoder.EncodePipe | + encoder.EncodeExclamation | + encoder.EncodeLeftPeriod | + encoder.EncodeRightPeriod | + encoder.EncodeInvalidUtf8), + }}, + }) +} + +// Options defines the configuration for this backend +type Options struct { + AccessToken string `config:"access_token"` + RootFolderID string `config:"root_folder_id"` + WorkspaceID string `config:"workspace_id"` + ListChunk int `config:"list_chunk"` + Enc encoder.MultiEncoder `config:"encoding"` +} + +// Fs represents a remote drime +type Fs struct { + name string // name of this remote + root string // the path we are working on + opt Options // parsed options + features *fs.Features // optional features + srv *rest.Client // the connection to the server + dirCache *dircache.DirCache // Map of directory path to directory id + pacer *fs.Pacer // pacer for API calls +} + +// Object describes a drime object +// +// The full set of metadata will always be present +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path + size int64 // size of the object + modTime time.Time // modification time of the object + id string // ID of the object + dirID string // ID of the object's directory + mimeType string // mime type of the object + url string // where to download this object +} + +// ------------------------------------------------------------ + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String converts this Fs to a string +func (f *Fs) String() string { + return fmt.Sprintf("drime root '%s'", f.root) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// parsePath parses a drime 'url' +func parsePath(path string) (root string) { + root = strings.Trim(path, "/") + return +} + +// retryErrorCodes is a slice of error codes that we will retry +var retryErrorCodes = []int{ + 429, // Too Many Requests. + 500, // Internal Server Error + 502, // Bad Gateway + 503, // Service Unavailable + 504, // Gateway Timeout + 509, // Bandwidth Limit Exceeded +} + +// Return true if the api error has the status given +func isAPIErr(err error, status string) bool { + var apiErr api.Error + if errors.As(err, &apiErr) { + return apiErr.Status == status + } + return false +} + +// shouldRetry returns a boolean as to whether this resp and err +// deserve to be retried. It returns the err as a convenience +func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { + if fserrors.ContextError(ctx, &err) { + return false, err + } + if isAPIErr(err, "error-rateLimit") { + // Give an immediate penalty to rate limits + fs.Debugf(nil, "Rate limited, sleep for %v", rateLimitSleep) + time.Sleep(rateLimitSleep) + //return true, pacer.RetryAfterError(err, 2*time.Second) + return true, err + } + return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err +} + +// readMetaDataForPath reads the metadata from the path +func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) { + // defer log.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err) + leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false) + if err != nil { + if err == fs.ErrorDirNotFound { + return nil, fs.ErrorObjectNotFound + } + return nil, err + } + + found, err := f.listAll(ctx, directoryID, false, true, leaf, func(item *api.Item) bool { + if item.Name == leaf { + info = item + return true + } + return false + }) + if err != nil { + return nil, err + } + if !found { + return nil, fs.ErrorObjectNotFound + } + return info, nil +} + +// readMetaDataForID reads the metadata for the ID given +func (f *Fs) readMetaDataForID(ctx context.Context, id string) (info *api.Item, err error) { + opts := rest.Opts{ + Method: "GET", + Path: "/contents/" + id, + Parameters: url.Values{ + "page": {"1"}, + "pageSize": {"1"}, // not interested in children so just ask for 1 + }, + } + var result api.Contents + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &result) + // Retry not found errors - when looking for an ID it should really exist + if isAPIErr(err, "error-notFound") { + return true, err + } + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("failed to get item info: %w", err) + } + return &result.Data.Item, nil +} + +// errorHandler parses a non 2xx error response into an error +func errorHandler(resp *http.Response) error { + body, err := rest.ReadBody(resp) + if err != nil { + fs.Debugf(nil, "Couldn't read error out of body: %v", err) + body = nil + } + // Decode error response if there was one - they can be blank + var errResponse api.Error + if len(body) > 0 { + err = json.Unmarshal(body, &errResponse) + if err != nil { + fs.Debugf(nil, "Couldn't decode error response: %v", err) + } + } + if errResponse.Status == "" { + errResponse.Status = fmt.Sprintf("%s (%d): %s", resp.Status, resp.StatusCode, string(body)) + } + return errResponse +} + +// NewFs constructs an Fs from the path, container:path +func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + + root = parsePath(root) + + client := fshttp.NewClient(ctx) + + f := &Fs{ + name: name, + root: root, + opt: *opt, + srv: rest.NewClient(client).SetRoot(rootURL), + pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), + } + f.features = (&fs.Features{ + // FIXME + // CaseInsensitive: false, + // CanHaveEmptyDirectories: true, + // DuplicateFiles: true, + // ReadMimeType: true, + // WriteMimeType: false, + // WriteDirSetModTime: true, + // DirModTimeUpdatesOnWrite: true, + }).Fill(ctx, f) + f.srv.SetErrorHandler(errorHandler) + f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken) + f.srv.SetHeader("Accept", "application/json") + + // Read account ID if not present + // err = f.readWorkspaceID(ctx, m) + // if err != nil { + // return nil, err + // } + + // Read Root Folder ID if not present + // err = f.readRootFolderID(ctx, m) + // if err != nil { + // return nil, err + // } + + // Get rootFolderID + rootID := f.opt.RootFolderID + f.dirCache = dircache.New(root, rootID, f) + + // Find the current root + err = f.dirCache.FindRoot(ctx, false) + if err != nil { + // Assume it is a file + newRoot, remote := dircache.SplitPath(root) + tempF := *f + tempF.dirCache = dircache.New(newRoot, rootID, &tempF) + tempF.root = newRoot + // Make new Fs which is the parent + err = tempF.dirCache.FindRoot(ctx, false) + if err != nil { + // No root so return old f + return f, nil + } + _, err := tempF.newObjectWithInfo(ctx, remote, nil) + if err != nil { + if err == fs.ErrorObjectNotFound { + // File doesn't exist so return old f + return f, nil + } + return nil, err + } + f.features.Fill(ctx, &tempF) + // XXX: update the old f here instead of returning tempF, since + // `features` were already filled with functions having *f as a receiver. + // See https://github.com/rclone/rclone/issues/2182 + f.dirCache = tempF.dirCache + f.root = tempF.root + // return an error with an fs which points to the parent + return f, fs.ErrorIsFile + } + return f, nil +} + +// Read the WorkspaceID into f.opt if not set and cache in the config file as workspace_id +// func (f *Fs) readWorkspaceID(ctx context.Context, m configmap.Mapper) (err error) { +// if f.opt.WorkspaceID != "" { +// return nil +// } +// opts := rest.Opts{ +// Method: "GET", +// Path: "/accounts/getid", +// } +// var result api.AccountsGetID +// var resp *http.Response +// err = f.pacer.Call(func() (bool, error) { +// resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) +// return shouldRetry(ctx, resp, err) +// }) +// if err != nil { +// return fmt.Errorf("failed to read account ID: %w", err) +// } +// f.opt.WorkspaceID = result.Data.ID +// m.Set("workspace_id", f.opt.WorkspaceID) +// return nil +// } + +// Read the Accounts info +// func (f *Fs) getAccounts(ctx context.Context) (result *api.AccountsGet, err error) { +// opts := rest.Opts{ +// Method: "GET", +// Path: "/accounts/" + f.opt.WorkspaceID, +// } +// result = new(api.AccountsGet) +// var resp *http.Response +// err = f.pacer.Call(func() (bool, error) { +// resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) +// return shouldRetry(ctx, resp, err) +// }) +// if err != nil { +// return nil, fmt.Errorf("failed to read accountd info: %w", err) +// } +// return result, nil +// } + +// Read the RootFolderID into f.opt if not set and cache in the config file as root_folder_id +// func (f *Fs) readRootFolderID(ctx context.Context, m configmap.Mapper) (err error) { +// if f.opt.RootFolderID != "" { +// return nil +// } +// result, err := f.getAccounts(ctx) +// if err != nil { +// return err +// } +// f.opt.RootFolderID = result.Data.RootFolder +// m.Set("root_folder_id", f.opt.RootFolderID) +// return nil +// } + +// rootSlash returns root with a slash on if it is empty, otherwise empty string +func (f *Fs) rootSlash() string { + if f.root == "" { + return f.root + } + return f.root + "/" +} + +// Return an Object from a path +// +// If it can't be found it returns the error fs.ErrorObjectNotFound. +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) { + o := &Object{ + fs: f, + remote: remote, + } + var err error + if info != nil { + // Set info + err = o.setMetaData(info) + } else { + err = o.readMetaData(ctx) // reads info and meta, returning an error + } + if err != nil { + return nil, err + } + return o, nil +} + +// NewObject finds the Object at remote. If it can't be found +// it returns the error fs.ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(ctx, remote, nil) +} + +// FindLeaf finds a directory of name leaf in the folder with ID pathID +func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { + // Find the leaf in pathID + found, err = f.listAll(ctx, pathID, true, false, leaf, func(item *api.Item) bool { + if item.Name == leaf { + pathIDOut = item.ID.String() + return true + } + return false + }) + return pathIDOut, found, err +} + +// createDir makes a directory with pathID as parent and name leaf and modTime +func (f *Fs) createDir(ctx context.Context, pathID, leaf string, modTime time.Time) (item *api.Item, err error) { + var resp *http.Response + var result api.CreateFolderResponse + opts := rest.Opts{ + Method: "POST", + Path: "/folders", + } + mkdir := api.CreateFolderRequest{ + Name: f.opt.Enc.FromStandardName(leaf), + ParentID: json.Number(pathID), + } + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &result) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("failed to create folder: %w", err) + } + return &result.Folder, nil +} + +// CreateDir makes a directory with pathID as parent and name leaf +func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { + // fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf) + item, err := f.createDir(ctx, pathID, leaf, time.Now()) + if err != nil { + return "", err + } + return item.ID.String(), nil +} + +// list the objects into the function supplied +// +// If directories is set it only sends directories +// User function to process a File item from listAll +// +// Should return true to finish processing +type listAllFn func(*api.Item) bool + +// Lists the directory required calling the user function on each item found +// +// If name is set then the server will limit the returned items to those +// with that name. +// +// If the user fn ever returns true then it early exits with found = true +func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, name string, fn listAllFn) (found bool, err error) { + opts := rest.Opts{ + Method: "GET", + Path: "/drive/file-entries", + Parameters: url.Values{}, + } + if dirID != "" { + opts.Parameters.Add("parentIds", dirID) + } + if name != "" { + // FIXME causes 500 server error + //opts.Parameters.Add("query", f.opt.Enc.FromStandardName(name)) + } + if directoriesOnly { + opts.Parameters.Add("type", api.ItemTypeFolder) + } + opts.Parameters.Set("perPage", strconv.Itoa(f.opt.ListChunk)) + page := 1 +OUTER: + for { + opts.Parameters.Set("page", strconv.Itoa(page)) + var result api.Listing + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return found, fmt.Errorf("couldn't list files: %w", err) + } + for _, item := range result.Data { + if item.Type == api.ItemTypeFolder { + if filesOnly { + continue + } + } else { + if directoriesOnly { + continue + } + } + item.Name = f.opt.Enc.ToStandardName(item.Name) + if fn(&item) { + found = true + break OUTER + } + } + if result.NextPage == 0 { + break + } + page = result.NextPage + } + return found, err +} + +// Convert a list item into a DirEntry +func (f *Fs) itemToDirEntry(ctx context.Context, remote string, info *api.Item) (entry fs.DirEntry, err error) { + if info.Type == api.ItemTypeFolder { + // cache the directory ID for later lookups + f.dirCache.Put(remote, info.ID.String()) + entry = fs.NewDir(remote, info.UpdatedAt). + SetSize(info.FileSize). + SetID(info.ID.String()). + SetParentID(info.ParentID.String()) + } else { + entry, err = f.newObjectWithInfo(ctx, remote, info) + if err != nil { + return nil, err + } + } + return entry, nil +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + directoryID, err := f.dirCache.FindDir(ctx, dir, false) + if err != nil { + return nil, err + } + var iErr error + _, err = f.listAll(ctx, directoryID, false, false, "", func(info *api.Item) bool { + remote := path.Join(dir, info.Name) + entry, err := f.itemToDirEntry(ctx, remote, info) + if err != nil { + iErr = err + return true + } + entries = append(entries, entry) + return false + }) + if err != nil { + return nil, err + } + if iErr != nil { + return nil, iErr + } + return entries, nil +} + +// implementation of ListR +// func (f *Fs) listR(ctx context.Context, dir string, list *list.Helper) (err error) { +// directoryID, err := f.dirCache.FindDir(ctx, dir, false) +// if err != nil { +// return err +// } +// opts := rest.Opts{ +// Method: "GET", +// Path: "/contents/" + directoryID, +// Parameters: url.Values{"maxdepth": {strconv.Itoa(maxDepth)}}, +// } +// page := 1 +// for { +// opts.Parameters.Set("page", strconv.Itoa(page)) +// opts.Parameters.Set("pageSize", strconv.Itoa(f.opt.ListChunk)) +// var result api.Contents +// var resp *http.Response +// err = f.pacer.Call(func() (bool, error) { +// resp, err = f.srv.CallJSON(ctx, &opts, nil, &result) +// return shouldRetry(ctx, resp, err) +// }) +// if err != nil { +// if isAPIErr(err, "error-notFound") { +// return fs.ErrorDirNotFound +// } +// return fmt.Errorf("couldn't recursively list files: %w", err) +// } +// // Result.Data.Item now contains a recursive listing so we will have to decode recursively +// var decode func(string, *api.Item) error +// decode = func(dir string, dirItem *api.Item) error { +// // If we have ChildrenCount but no Children this means the recursion stopped here +// if dirItem.ChildrenCount > 0 && len(dirItem.Children) == 0 { +// return f.listR(ctx, dir, list) +// } +// for _, item := range dirItem.Children { +// if item.Type != api.ItemTypeFolder && item.Type != api.ItemTypeFile { +// fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type) +// continue +// } +// item.Name = f.opt.Enc.ToStandardName(item.Name) +// remote := path.Join(dir, item.Name) +// entry, err := f.itemToDirEntry(ctx, remote, item) +// if err != nil { +// return err +// } +// err = list.Add(entry) +// if err != nil { +// return err +// } +// if item.Type == api.ItemTypeFolder { +// err := decode(remote, item) +// if err != nil { +// return err +// } +// } +// } +// return nil +// } +// err = decode(dir, &result.Data.Item) +// if err != nil { +// return err +// } +// if !result.Metadata.HasNextPage { +// break +// } +// page += 1 +// } +// return err +// } + +// ListR lists the objects and directories of the Fs starting +// from dir recursively into out. +// +// dir should be "" to start from the root, and should not +// have trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +// +// It should call callback for each tranche of entries read. +// These need not be returned in any particular order. If +// callback returns an error then the listing will stop +// immediately. +// +// Don't implement this unless you have a more efficient way +// of listing recursively than doing a directory traversal. +// func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { +// list := list.NewHelper(callback) +// err = f.listR(ctx, dir, list) +// if err != nil { +// return err +// } +// return list.Flush() +// } + +// Creates from the parameters passed in a half finished Object which +// must have setMetaData called on it +// +// Returns the object, leaf, directoryID and error. +// +// Used to create new objects +func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) { + // Create the directory for the object if it doesn't exist + leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true) + if err != nil { + return + } + // Temporary Object under construction + o = &Object{ + fs: f, + remote: remote, + } + return o, leaf, directoryID, nil +} + +// Put the object +// +// Copy the reader in to the new object which is returned. +// +// The new object may have been created if an error is returned +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + existingObj, err := f.NewObject(ctx, src.Remote()) + switch err { + case nil: + return existingObj, existingObj.Update(ctx, in, src, options...) + case fs.ErrorObjectNotFound: + // Not found so create it + return f.PutUnchecked(ctx, in, src, options...) + default: + return nil, err + } +} + +// PutStream uploads to the remote path with the modTime given of indeterminate size +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) +} + +// PutUnchecked the object into the container +// +// This will produce a duplicate if the object already exists. +// +// Copy the reader in to the new object which is returned. +// +// The new object may have been created if an error is returned +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + remote := src.Remote() + size := src.Size() + modTime := src.ModTime(ctx) + + o, _, _, err := f.createObject(ctx, remote, modTime, size) + if err != nil { + return nil, err + } + return o, o.Update(ctx, in, src, options...) +} + +// Mkdir creates the container if it doesn't exist +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + _, err := f.dirCache.FindDir(ctx, dir, true) + return err +} + +// DirSetModTime sets the directory modtime for dir +func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error { + dirID, err := f.dirCache.FindDir(ctx, dir, false) + if err != nil { + return err + } + _, err = f.setModTime(ctx, dirID, modTime) + return err +} + +// deleteObject removes an object by ID +func (f *Fs) deleteObject(ctx context.Context, id string) error { + opts := rest.Opts{ + Method: "DELETE", + Path: "/contents/", + } + request := api.DeleteRequest{ + ContentsID: id, + } + var result api.DeleteResponse + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, &request, &result) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return fmt.Errorf("failed to delete item: %w", err) + } + // Check the individual result codes also + for _, err := range result.Data { + if err.IsError() { + return fmt.Errorf("failed to delete item: %w", err) + } + } + return nil +} + +// purgeCheck removes the root directory, if check is set then it +// refuses to do so if it has anything in +func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { + root := path.Join(f.root, dir) + if root == "" { + return errors.New("can't purge root directory") + } + dc := f.dirCache + rootID, err := dc.FindDir(ctx, dir, false) + if err != nil { + return err + } + + // Check to see if there is contents in the directory + if check { + found, err := f.listAll(ctx, rootID, false, false, "", func(item *api.Item) bool { + return true + }) + if err != nil { + return err + } + if found { + return fs.ErrorDirectoryNotEmpty + } + } + + // Delete the directory + err = f.deleteObject(ctx, rootID) + if err != nil { + return err + } + + f.dirCache.FlushDir(dir) + return nil +} + +// Rmdir deletes the root folder +// +// Returns an error if it isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, true) +} + +// Precision return the precision of this Fs +func (f *Fs) Precision() time.Duration { + return time.Second +} + +// Purge deletes all the files and the container +// +// Optional interface: Only implement this if you have a way of +// deleting all the files quicker than just running Remove() on the +// result of List() +func (f *Fs) Purge(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, false) +} + +// About gets quota information +func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { + // result, err := f.getAccounts(ctx) + // if err != nil { + // return nil, err + // } + // used := result.Data.StatsCurrent.Storage + // files := result.Data.StatsCurrent.FileCount + // total := result.Data.SubscriptionLimitStorage + // usage = &fs.Usage{ + // Used: fs.NewUsageValue(used), // bytes in use + // Total: fs.NewUsageValue(total), // bytes total + // Free: fs.NewUsageValue(total - used), // bytes free + // Objects: fs.NewUsageValue(files), // total objects + // } + // FIXME + return usage, nil +} + +// patch an attribute on an object to value +func (f *Fs) patch(ctx context.Context, id, attribute string, value any) (item *api.Item, err error) { + var resp *http.Response + var request = api.UpdateItemRequest{ + Attribute: attribute, + Value: value, + } + var result api.UpdateItemResponse + opts := rest.Opts{ + Method: "PUT", + Path: "/contents/" + id + "/update", + } + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("failed to patch item %q to %v: %w", attribute, value, err) + } + return &result.Data, nil +} + +// rename a file or a folder +func (f *Fs) rename(ctx context.Context, id, newLeaf string) (item *api.Item, err error) { + return f.patch(ctx, id, "name", f.opt.Enc.FromStandardName(newLeaf)) +} + +// setModTime sets the modification time of a file or folder +func (f *Fs) setModTime(ctx context.Context, id string, modTime time.Time) (item *api.Item, err error) { + return f.patch(ctx, id, "modTime", api.ToNativeTime(modTime)) +} + +// move a file or a folder to a new directory +func (f *Fs) move(ctx context.Context, id, newDirID string) (item *api.Item, err error) { + var resp *http.Response + var request = api.MoveRequest{ + FolderID: newDirID, + ContentsID: id, + } + var result api.MoveResponse + opts := rest.Opts{ + Method: "PUT", + Path: "/contents/move", + } + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("failed to move item: %w", err) + } + itemResult, ok := result.Data[id] + if !ok || itemResult.Item.ID == "" { + return nil, errors.New("failed to read result of move") + } + return &itemResult.Item, nil +} + +// move and rename a file or folder to directoryID with leaf +func (f *Fs) moveTo(ctx context.Context, id, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID string) (info *api.Item, err error) { + // Can have duplicates so don't have to be careful here + + // Rename if required + if srcLeaf != dstLeaf { + info, err = f.rename(ctx, id, dstLeaf) + if err != nil { + return nil, err + } + } + // Move if required + if srcDirectoryID != dstDirectoryID { + info, err = f.move(ctx, id, dstDirectoryID) + if err != nil { + return nil, err + } + } + if info == nil { + return f.readMetaDataForID(ctx, id) + } + return info, nil +} + +// Move src to this remote using server-side move operations. +// +// This is stored with the remote path given. +// +// It returns the destination Object and a possible error. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantMove +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't move - not same remote type") + return nil, fs.ErrorCantMove + } + + // Find existing object + srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) + if err != nil { + return nil, err + } + + // Create temporary object + dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) + if err != nil { + return nil, err + } + + // Do the move + info, err := f.moveTo(ctx, srcObj.id, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) + if err != nil { + return nil, err + } + + err = dstObj.setMetaData(info) + if err != nil { + return nil, err + } + return dstObj, nil +} + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server-side move operations. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantDirMove +// +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { + srcFs, ok := src.(*Fs) + if !ok { + fs.Debugf(srcFs, "Can't move directory - not same remote type") + return fs.ErrorCantDirMove + } + + srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) + if err != nil { + return err + } + + // Do the move + _, err = f.moveTo(ctx, srcID, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) + if err != nil { + return err + } + srcFs.dirCache.FlushDir(srcRemote) + return nil +} + +// copy a file or a folder to a new directory +func (f *Fs) copy(ctx context.Context, id, newDirID string) (item *api.Item, err error) { + var resp *http.Response + var request = api.CopyRequest{ + FolderID: newDirID, + ContentsID: id, + } + var result api.CopyResponse + opts := rest.Opts{ + Method: "POST", + Path: "/contents/copy", + } + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("failed to copy item: %w", err) + } + itemResult, ok := result.Data[id] + if !ok || itemResult.Item.ID == "" { + return nil, errors.New("failed to read result of copy") + } + return &itemResult.Item, nil +} + +// copy and rename a file or folder to directoryID with leaf +func (f *Fs) copyTo(ctx context.Context, srcID, srcLeaf, dstLeaf, dstDirectoryID string) (info *api.Item, err error) { + // Can have duplicates so don't have to be careful here + + // Copy to dstDirectoryID first + info, err = f.copy(ctx, srcID, dstDirectoryID) + if err != nil { + return nil, err + } + + // Rename if required + if srcLeaf != dstLeaf { + info, err = f.rename(ctx, info.ID.String(), dstLeaf) + if err != nil { + return nil, err + } + } + return info, nil +} + +// Copy src to this remote using server-side copy operations. +// +// This is stored with the remote path given. +// +// It returns the destination Object and a possible error. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantCopy +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) { + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't copy - not same remote type") + return nil, fs.ErrorCantCopy + } + srcLeaf := path.Base(srcObj.remote) + + srcPath := srcObj.fs.rootSlash() + srcObj.remote + dstPath := f.rootSlash() + remote + if srcPath == dstPath { + return nil, fmt.Errorf("can't copy %q -> %q as are same name", srcPath, dstPath) + } + + // Find existing object + existingObj, err := f.NewObject(ctx, remote) + if err == nil { + defer func() { + // Don't remove existing object if returning an error + if err != nil { + return + } + fs.Debugf(existingObj, "Server side copy: removing existing object after successful copy") + err = existingObj.Remove(ctx) + }() + } + + // Create temporary object + dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) + if err != nil { + return nil, err + } + + // Copy the object + info, err := f.copyTo(ctx, srcObj.id, srcLeaf, dstLeaf, dstDirectoryID) + if err != nil { + return nil, err + } + err = dstObj.setMetaData(info) + if err != nil { + return nil, err + } + + // Reset the modification time as copy does not preserve it + err = dstObj.SetModTime(ctx, srcObj.modTime) + if err != nil { + return nil, err + } + + return dstObj, nil +} + +// unLink a file or directory +// func (f *Fs) unLink(ctx context.Context, remote string, id string, info *api.Item) (err error) { +// if info == nil { +// info, err = f.readMetaDataForID(ctx, id) +// if err != nil { +// return err +// } +// } +// for linkID, link := range info.DirectLinks { +// fs.Debugf(remote, "Removing direct link %s", link.DirectLink) +// opts := rest.Opts{ +// Method: "DELETE", +// Path: "/contents/" + id + "/directlinks/" + linkID, +// } +// var result api.Error +// err := f.pacer.Call(func() (bool, error) { +// resp, err := f.srv.CallJSON(ctx, &opts, nil, &result) +// return shouldRetry(ctx, resp, err) +// }) +// if err != nil { +// return fmt.Errorf("failed to unlink: %s", link.DirectLink) +// } +// } +// return nil +// } + +// PublicLink adds a "readable by anyone with link" permission on the given file or folder. +// func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { +// id, err := f.dirCache.FindDir(ctx, remote, false) +// var info *api.Item +// if err == nil { +// fs.Debugf(f, "attempting to share directory '%s'", remote) +// } else { +// fs.Debugf(f, "attempting to share single file '%s'", remote) +// info, err = f.readMetaDataForPath(ctx, remote) +// if err != nil { +// return "", err +// } +// id = info.ID.String() +// } +// if unlink { +// return "", f.unLink(ctx, remote, id, info) +// } +// var resp *http.Response +// var request api.DirectLinksRequest +// var result api.DirectLinksResult +// opts := rest.Opts{ +// Method: "POST", +// Path: "/contents/" + id + "/directlinks", +// } +// if expire != fs.DurationOff { +// when := time.Now().Add(time.Duration(expire)) +// fs.Debugf(f, "Link expires at %v (duration %v)", when, expire) +// request.ExpireTime = api.ToNativeTime(when) +// } +// err = f.pacer.Call(func() (bool, error) { +// resp, err = f.srv.CallJSON(ctx, &opts, &request, &result) +// return shouldRetry(ctx, resp, err) +// }) +// if err != nil { +// return "", fmt.Errorf("failed to create direct link: %w", err) +// } +// return result.Data.DirectLink, err +// } + +// DirCacheFlush resets the directory cache - used in testing as an +// optional interface +func (f *Fs) DirCacheFlush() { + f.dirCache.ResetRoot() +} + +// Hashes returns the supported hash sets. +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.None) +} + +// MergeDirs merges the contents of all the directories passed +// in into the first one and rmdirs the other directories. +// func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { +// if len(dirs) < 2 { +// return nil +// } +// dstDir := dirs[0] +// for _, srcDir := range dirs[1:] { +// // list the objects +// infos := []*api.Item{} +// _, err := f.listAll(ctx, srcDir.ID(), false, false, "", func(info *api.Item) bool { +// infos = append(infos, info) +// return false +// }) +// if err != nil { +// return fmt.Errorf("MergeDirs list failed on %v: %w", srcDir, err) +// } +// // move them into place +// for _, info := range infos { +// fs.Infof(srcDir, "merging %q", info.Name) +// // Move the file into the destination +// _, err = f.move(ctx, info.ID.String(), dstDir.ID()) +// if err != nil { +// return fmt.Errorf("MergeDirs move failed on %q in %v: %w", info.Name, srcDir, err) +// } +// } +// // rmdir the now empty source directory +// fs.Infof(srcDir, "removing empty directory") +// err = f.deleteObject(ctx, srcDir.ID()) +// if err != nil { +// return fmt.Errorf("MergeDirs move failed to rmdir %q: %w", srcDir, err) +// } +// } +// return nil +// } + +// ------------------------------------------------------------ + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// Hash returns the SHA-1 of an object returning a lowercase hex string +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + return "", hash.ErrUnsupported +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + return o.size +} + +// setMetaDataAny sets the metadata from info but doesn't check the type +func (o *Object) setMetaDataAny(info *api.Item) { + o.size = info.FileSize + o.modTime = info.UpdatedAt + o.id = info.ID.String() + o.dirID = info.ParentID.String() + o.mimeType = info.Mime + o.url = info.URL +} + +// setMetaData sets the metadata from info +func (o *Object) setMetaData(info *api.Item) (err error) { + if info.Type == api.ItemTypeFolder { + return fs.ErrorIsDir + } + if info.ID == "" { + return fmt.Errorf("ID not found in response") + } + o.setMetaDataAny(info) + return nil +} + +// readMetaData gets the metadata unconditionally as we expect Object +// to always have the full set of metadata +func (o *Object) readMetaData(ctx context.Context) (err error) { + var info *api.Item + if o.id != "" { + info, err = o.fs.readMetaDataForID(ctx, o.id) + } else { + info, err = o.fs.readMetaDataForPath(ctx, o.remote) + } + if err != nil { + if isAPIErr(err, "error-notFound") { + return fs.ErrorObjectNotFound + } + return err + } + return o.setMetaData(info) +} + +// ModTime returns the modification time of the object +// +// It attempts to read the objects mtime and if that isn't present the +// LastModified returned in the http headers +func (o *Object) ModTime(ctx context.Context) time.Time { + return o.modTime +} + +// SetModTime sets the modification time of the local fs object +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + info, err := o.fs.setModTime(ctx, o.id, modTime) + if err != nil { + return err + } + return o.setMetaData(info) +} + +// Storable returns a boolean showing whether this object storable +func (o *Object) Storable() bool { + return true +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + if o.id == "" { + return nil, errors.New("can't download - no id") + } + if o.url == "" { + // On upload an Object is returned with no url, so fetch it here if needed + err = o.readMetaData(ctx) + if err != nil { + return nil, fmt.Errorf("read metadata: %w", err) + } + } + fs.FixRangeOption(options, o.size) + var resp *http.Response + opts := rest.Opts{ + Method: "GET", + RootURL: baseURL + o.url, + Options: options, + // Workaround for bug in content servers - no longer needed + // ExtraHeaders: map[string]string{ + // "Cookie": "accountToken=" + o.fs.opt.AccessToken, + // }, + } + + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = o.fs.srv.Call(ctx, &opts) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + return resp.Body, err +} + +// Update the object with the contents of the io.Reader, modTime and size +// +// If existing is set then it updates the object rather than creating a new one. +// +// The new object may have been created if an error is returned. +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { + remote := o.Remote() + //modTime := src.ModTime(ctx) + + // Create the directory for the object if it doesn't exist + leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true) + if err != nil { + return err + } + + // Do the upload + var resp *http.Response + var result api.UploadResponse + var encodedLeaf = o.fs.opt.Enc.FromStandardName(leaf) + opts := rest.Opts{ + Method: "POST", + Body: in, + MultipartParams: url.Values{ + "parentId": {directoryID}, + "relativePath": {encodedLeaf}, + // FIXME "modTime": {strconv.FormatInt(modTime.Unix(), 10)}, + }, + MultipartContentName: "file", + MultipartFileName: encodedLeaf, + Path: "/uploads", + Options: options, + } + err = o.fs.pacer.CallNoRetry(func() (bool, error) { + resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return fmt.Errorf("failed to upload file: %w", err) + } + return o.setMetaData(&result.FileEntry) +} + +// Remove an object +func (o *Object) Remove(ctx context.Context) error { + return o.fs.deleteObject(ctx, o.id) +} + +// ID returns the ID of the Object if known, or "" if not +func (o *Object) ID() string { + return o.id +} + +// MimeType returns the content type of the Object if known, or "" if not +func (o *Object) MimeType(ctx context.Context) string { + return o.mimeType +} + +// ParentID returns the ID of the Object parent if known, or "" if not +func (o *Object) ParentID() string { + return o.dirID +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + _ fs.PutStreamer = (*Fs)(nil) + _ fs.Copier = (*Fs)(nil) + _ fs.Abouter = (*Fs)(nil) + _ fs.Mover = (*Fs)(nil) + _ fs.DirMover = (*Fs)(nil) + _ fs.DirCacheFlusher = (*Fs)(nil) + //_ fs.PublicLinker = (*Fs)(nil) + //_ fs.MergeDirser = (*Fs)(nil) + _ fs.DirSetModTimer = (*Fs)(nil) + // _ fs.ListRer = (*Fs)(nil) + _ fs.Object = (*Object)(nil) + _ fs.IDer = (*Object)(nil) + _ fs.MimeTyper = (*Object)(nil) +) + +/* +{ + "current_page": 1, + "data": [ + { + "id": 483924217, + "name": "go.sum", + "description": null, + "file_name": "7adf30be-2fe2-4f20-87ba-fa4e9d51e482", + "mime": "application/octet-stream", + "color": null, + "backup": false, + "tracked": 0, + "file_size": 108408, + "user_id": null, + "parent_id": 483924168, + "created_at": "2025-12-02T18:36:00.000000Z", + "updated_at": "2025-12-03T09:03:26.000000Z", + "deleted_at": null, + "is_deleted": 0, + "path": "483924168/483924217", + "disk_prefix": "7adf30be-2fe2-4f20-87ba-fa4e9d51e482", + "type": "file", + "extension": "sum", + "file_hash": null, + "public": false, + "thumbnail": false, + "mux_status": null, + "thumbnail_url": null, + "workspace_id": 0, + "is_encrypted": 0, + "iv": null, + "vault_id": null, + "owner_id": 19984, + "hash": "NDgzOTI0MjE3fA", + "url": "api/v1/file-entries/483924217", + "users": [ + { + "email": "support@rclone.com", + "id": 19984, + "avatar": "https://www.gravatar.com/avatar/03a1d0a602fe31ddbaad590c21c988b8?s=&d=retro", + "model_type": "user", + "owns_entry": true, + "entry_permissions": [], + "display_name": "support" + } + ], + "tags": [], + "permissions": { + "files.update": true, + "files.create": true, + "files.download": true, + "files.delete": true + } + } + ], + "from": 1, + "last_page": 1, + "next_page": null, + "per_page": 1000, + "prev_page": null, + "to": 1, + "total": 1 +} +*/ diff --git a/backend/drime/drime_test.go b/backend/drime/drime_test.go new file mode 100644 index 000000000..10086d582 --- /dev/null +++ b/backend/drime/drime_test.go @@ -0,0 +1,17 @@ +// Test Drime filesystem interface +package drime_test + +import ( + "testing" + + "github.com/rclone/rclone/backend/drime" + "github.com/rclone/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "TestDrime:", + NilObject: (*drime.Object)(nil), + }) +}