mirror of
https://github.com/rclone/rclone.git
synced 2025-12-11 22:14:05 +01:00
onedrive: add metadata support
This change adds support for metadata on OneDrive. Metadata (including permissions) is supported for both files and directories. OneDrive supports System Metadata (not User Metadata, as of this writing.) Much of the metadata is read-only, and there are some differences between OneDrive Personal and Business (see table in OneDrive backend docs for details). Permissions are also supported, if --onedrive-metadata-permissions is set. The accepted values for --onedrive-metadata-permissions are read, write, read,write, and off (the default). write supports adding new permissions, updating the "role" of existing permissions, and removing permissions. Updating and removing require the Permission ID to be known, so it is recommended to use read,write instead of write if you wish to update/remove permissions. Permissions are read/written in JSON format using the same schema as the OneDrive API, which differs slightly between OneDrive Personal and Business. (See OneDrive backend docs for examples.) To write permissions, pass in a "permissions" metadata key using this same format. The --metadata-mapper tool can be very helpful for this. When adding permissions, an email address can be provided in the User.ID or DisplayName properties of grantedTo or grantedToIdentities. Alternatively, an ObjectID can be provided in User.ID. At least one valid recipient must be provided in order to add a permission for a user. Creating a Public Link is also supported, if Link.Scope is set to "anonymous". Note that adding a permission can fail if a conflicting permission already exists for the file/folder. To update an existing permission, include both the Permission ID and the new roles to be assigned. roles is the only property that can be changed. To remove permissions, pass in a blob containing only the permissions you wish to keep (which can be empty, to remove all.) Note that both reading and writing permissions requires extra API calls, so if you don't need to read or write permissions it is recommended to omit --onedrive- metadata-permissions. Metadata and permissions are supported for Folders (directories) as well as Files. Note that setting the mtime or btime on a Folder requires one extra API call on OneDrive Business only. OneDrive does not currently support User Metadata. When writing metadata, only writeable system properties will be written -- any read-only or unrecognized keys passed in will be ignored. TIP: to see the metadata and permissions for any file or folder, run: rclone lsjson remote:path --stat -M --onedrive-metadata-permissions read See the OneDrive backend docs for a table of all the supported metadata properties.
This commit is contained in:
@@ -4,6 +4,7 @@ package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
@@ -29,6 +30,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
@@ -93,6 +95,9 @@ var (
|
||||
|
||||
// QuickXorHashType is the hash.Type for OneDrive
|
||||
QuickXorHashType hash.Type
|
||||
|
||||
//go:embed metadata.md
|
||||
metadataHelp string
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -103,6 +108,10 @@ func init() {
|
||||
Description: "Microsoft OneDrive",
|
||||
NewFs: NewFs,
|
||||
Config: Config,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: metadataHelp,
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "region",
|
||||
Help: "Choose national cloud region for OneDrive.",
|
||||
@@ -173,7 +182,8 @@ Choose or manually enter a custom space separated list with all scopes, that rcl
|
||||
Value: "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All offline_access",
|
||||
Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
|
||||
},
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
Name: "disable_site_permission",
|
||||
Help: `Disable the request for Sites.Read.All permission.
|
||||
|
||||
@@ -356,6 +366,16 @@ It is recommended if you are mounting your onedrive at the root
|
||||
(or near the root when using crypt) and using rclone |rc vfs/refresh|.
|
||||
`, "|", "`"),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "metadata_permissions",
|
||||
Help: `Control whether permissions should be read or written in metadata.
|
||||
|
||||
Reading permissions metadata from files can be done quickly, but it
|
||||
isn't always desirable to set the permissions from the metadata.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: rwOff,
|
||||
Examples: rwExamples,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -639,7 +659,8 @@ Examples:
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/drives/" + finalDriveID + "/root"}
|
||||
Path: "/drives/" + finalDriveID + "/root",
|
||||
}
|
||||
var rootItem api.Item
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
|
||||
if err != nil {
|
||||
@@ -679,6 +700,7 @@ type Options struct {
|
||||
AVOverride bool `config:"av_override"`
|
||||
Delta bool `config:"delta"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
MetadataPermissions rwChoice `config:"metadata_permissions"`
|
||||
}
|
||||
|
||||
// Fs represents a remote OneDrive
|
||||
@@ -711,6 +733,17 @@ type Object struct {
|
||||
id string // ID of the object
|
||||
hash string // Hash of the content, usually QuickXorHash but set as hash_type
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
meta *Metadata // metadata properties
|
||||
}
|
||||
|
||||
// Directory describes a OneDrive directory
|
||||
type Directory struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64 // size of directory and contents or -1 if unknown
|
||||
items int64 // number of objects or -1 for unknown
|
||||
id string // dir ID
|
||||
meta *Metadata // metadata properties
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -751,8 +784,10 @@ var retryErrorCodes = []int{
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
var gatewayTimeoutError sync.Once
|
||||
var errAsyncJobAccessDenied = errors.New("async job failed - access denied")
|
||||
var (
|
||||
gatewayTimeoutError sync.Once
|
||||
errAsyncJobAccessDenied = errors.New("async job failed - access denied")
|
||||
)
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
@@ -969,10 +1004,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
hashType: QuickXorHashType,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: false,
|
||||
ReadDirMetadata: true,
|
||||
WriteDirMetadata: true,
|
||||
WriteDirSetModTime: true,
|
||||
UserDirMetadata: false,
|
||||
DirModTimeUpdatesOnWrite: false,
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
@@ -998,7 +1042,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
})
|
||||
|
||||
// Get rootID
|
||||
var rootID = opt.RootFolderID
|
||||
rootID := opt.RootFolderID
|
||||
if rootID == "" {
|
||||
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
|
||||
if err != nil {
|
||||
@@ -1065,6 +1109,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Ite
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
meta: f.newMetadata(remote),
|
||||
}
|
||||
var err error
|
||||
if info != nil {
|
||||
@@ -1123,11 +1168,11 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
// fmt.Printf("...Error %v\n", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
//fmt.Printf("...Id %q\n", *info.Id)
|
||||
// fmt.Printf("...Id %q\n", *info.Id)
|
||||
return info.GetID(), nil
|
||||
}
|
||||
|
||||
@@ -1216,8 +1261,9 @@ func (f *Fs) itemToDirEntry(ctx context.Context, dir string, info *api.Item) (en
|
||||
// cache the directory ID for later lookups
|
||||
id := info.GetID()
|
||||
f.dirCache.Put(remote, id)
|
||||
d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
|
||||
d.SetItems(folder.ChildCount)
|
||||
d := f.newDir(id, remote)
|
||||
d.items = folder.ChildCount
|
||||
f.setSystemMetadata(info, d.meta, remote, dirMimeType)
|
||||
entry = d
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
@@ -1378,7 +1424,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
|
||||
return list.Flush()
|
||||
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
@@ -1479,6 +1524,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
if f.driveType == driveTypePersonal {
|
||||
return time.Millisecond
|
||||
}
|
||||
return time.Second
|
||||
}
|
||||
|
||||
@@ -1618,12 +1666,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Copy does NOT copy the modTime from the source and there seems to
|
||||
// be no way to set date before
|
||||
// This will create TWO versions on OneDrive
|
||||
err = dstObj.SetModTime(ctx, srcObj.ModTime(ctx))
|
||||
|
||||
// Set modtime and adjust metadata if required
|
||||
_, err = dstObj.Metadata(ctx) // make sure we get the correct new normalizedID
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dstObj, nil
|
||||
dstObj.meta.permsAddOnly = true // dst will have different IDs from src, so can't update/remove
|
||||
info, err := f.fetchAndUpdateMetadata(ctx, src, fs.MetadataAsOpenOptions(ctx), dstObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = dstObj.setMetaData(info)
|
||||
return dstObj, err
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
@@ -1678,12 +1733,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
CreatedDateTime: api.Timestamp(srcObj.modTime),
|
||||
CreatedDateTime: api.Timestamp(srcObj.tryGetBtime(srcObj.modTime)),
|
||||
LastModifiedDateTime: api.Timestamp(srcObj.modTime),
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
var info api.Item
|
||||
var info *api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
@@ -1692,11 +1747,18 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dstObj.setMetaData(&info)
|
||||
err = dstObj.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dstObj, nil
|
||||
|
||||
// Set modtime and adjust metadata if required
|
||||
info, err = f.fetchAndUpdateMetadata(ctx, src, fs.MetadataAsOpenOptions(ctx), dstObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = dstObj.setMetaData(info)
|
||||
return dstObj, err
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
@@ -2032,6 +2094,7 @@ func (o *Object) Size() int64 {
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
if info.GetFolder() != nil {
|
||||
log.Stack(o, "setMetaData called on dir instead of obj")
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
o.hasMetaData = true
|
||||
@@ -2071,9 +2134,40 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
o.modTime = time.Time(info.GetLastModifiedDateTime())
|
||||
}
|
||||
o.id = info.GetID()
|
||||
if o.meta == nil {
|
||||
o.meta = o.fs.newMetadata(o.Remote())
|
||||
}
|
||||
o.fs.setSystemMetadata(info, o.meta, o.remote, o.mimeType)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets system metadata shared by both objects and directories
|
||||
func (f *Fs) setSystemMetadata(info *api.Item, meta *Metadata, remote string, mimeType string) {
|
||||
meta.fs = f
|
||||
meta.remote = remote
|
||||
meta.mimeType = mimeType
|
||||
if info == nil {
|
||||
fs.Errorf("setSystemMetadata", "internal error: info is nil")
|
||||
}
|
||||
fileSystemInfo := info.GetFileSystemInfo()
|
||||
if fileSystemInfo != nil {
|
||||
meta.mtime = time.Time(fileSystemInfo.LastModifiedDateTime)
|
||||
meta.btime = time.Time(fileSystemInfo.CreatedDateTime)
|
||||
|
||||
} else {
|
||||
meta.mtime = time.Time(info.GetLastModifiedDateTime())
|
||||
meta.btime = time.Time(info.GetCreatedDateTime())
|
||||
}
|
||||
meta.utime = time.Time(info.GetCreatedDateTime())
|
||||
meta.description = info.Description
|
||||
meta.packageType = info.GetPackageType()
|
||||
meta.createdBy = info.GetCreatedBy()
|
||||
meta.lastModifiedBy = info.GetLastModifiedBy()
|
||||
meta.malwareDetected = info.MalwareDetected()
|
||||
meta.shared = info.Shared
|
||||
meta.normalizedID = info.GetID()
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
@@ -2111,7 +2205,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "PATCH", "")
|
||||
update := api.SetFileSystemInfo{
|
||||
FileSystemInfo: api.FileSystemInfoFacet{
|
||||
CreatedDateTime: api.Timestamp(modTime),
|
||||
CreatedDateTime: api.Timestamp(o.tryGetBtime(modTime)),
|
||||
LastModifiedDateTime: api.Timestamp(modTime),
|
||||
},
|
||||
}
|
||||
@@ -2175,18 +2269,19 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusOK && resp.ContentLength > 0 && resp.Header.Get("Content-Range") == "" {
|
||||
//Overwrite size with actual size since size readings from Onedrive is unreliable.
|
||||
// Overwrite size with actual size since size readings from Onedrive is unreliable.
|
||||
o.size = resp.ContentLength
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||
func (o *Object) createUploadSession(ctx context.Context, src fs.ObjectInfo, modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "POST", "/createUploadSession")
|
||||
createRequest := api.CreateUploadRequest{}
|
||||
createRequest.Item.FileSystemInfo.CreatedDateTime = api.Timestamp(modTime)
|
||||
createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
|
||||
createRequest, err := o.fetchMetadataForCreate(ctx, src, opts.Options, modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createRequest, &response)
|
||||
@@ -2237,7 +2332,7 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
||||
// var response api.UploadFragmentResponse
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
var skip = int64(0)
|
||||
skip := int64(0)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
toSend := chunkSize - skip
|
||||
opts := rest.Opts{
|
||||
@@ -2304,14 +2399,17 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
// if there is metadata, it will be set at the same time, except for permissions, which must be set after (if present and enabled).
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
size := src.Size()
|
||||
modTime := src.ModTime(ctx)
|
||||
if size <= 0 {
|
||||
return nil, errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
|
||||
// Create upload session
|
||||
fs.Debugf(o, "Starting multipart upload")
|
||||
session, err := o.createUploadSession(ctx, modTime)
|
||||
session, err := o.createUploadSession(ctx, src, modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -2344,12 +2442,25 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
position += n
|
||||
}
|
||||
|
||||
return info, nil
|
||||
err = o.setMetaData(info)
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
if !o.fs.opt.MetadataPermissions.IsSet(rwWrite) {
|
||||
return info, err
|
||||
}
|
||||
info, err = o.fs.fetchAndUpdatePermissions(ctx, src, options, o) // for permissions, which can't be set during original upload
|
||||
if info == nil {
|
||||
return nil, err
|
||||
}
|
||||
return info, o.setMetaData(info)
|
||||
}
|
||||
|
||||
// Update the content of a remote file within 4 MiB size in one single request
|
||||
// This function will set modtime after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
// (currently only used when size is exactly 0)
|
||||
// This function will set modtime and metadata after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
size := src.Size()
|
||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
|
||||
}
|
||||
@@ -2380,7 +2491,8 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
return nil, err
|
||||
}
|
||||
// Set the mod time now and read metadata
|
||||
return o.setModTime(ctx, modTime)
|
||||
info, err = o.fs.fetchAndUpdateMetadata(ctx, src, options, o)
|
||||
return info, o.setMetaData(info)
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
@@ -2395,17 +2507,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
|
||||
size := src.Size()
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
var info *api.Item
|
||||
if size > 0 {
|
||||
info, err = o.uploadMultipart(ctx, in, size, modTime, options...)
|
||||
info, err = o.uploadMultipart(ctx, in, src, options...)
|
||||
} else if size == 0 {
|
||||
info, err = o.uploadSinglepart(ctx, in, size, modTime, options...)
|
||||
info, err = o.uploadSinglepart(ctx, in, src, options...)
|
||||
} else {
|
||||
return errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
if err != nil {
|
||||
fs.PrettyPrint(info, "info from Update error", fs.LogLevelDebug)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2416,8 +2528,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.Errorf(o, "Failed to remove versions: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return o.setMetaData(info)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
@@ -2769,4 +2880,11 @@ var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
_ fs.Metadataer = (*Object)(nil)
|
||||
_ fs.Metadataer = (*Directory)(nil)
|
||||
_ fs.SetModTimer = (*Directory)(nil)
|
||||
_ fs.SetMetadataer = (*Directory)(nil)
|
||||
_ fs.MimeTyper = &Directory{}
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user