From 393065148e5883442d73b90b21a98607675391bc Mon Sep 17 00:00:00 2001 From: JinnyYi Date: Wed, 17 Nov 2021 17:30:07 +0800 Subject: [PATCH 1/5] feat: Add glob support --- cmd/byctl/ls.go | 110 +++++++++++++--------- cmd/byctl/utils.go | 10 ++ docs/rfcs/96-add-glob-patterns-support.md | 27 +++--- go.mod | 1 + go.sum | 2 + operations/match.go | 34 +++++++ 6 files changed, 126 insertions(+), 58 deletions(-) create mode 100644 operations/match.go diff --git a/cmd/byctl/ls.go b/cmd/byctl/ls.go index 57d289c..1b6e5dd 100644 --- a/cmd/byctl/ls.go +++ b/cmd/byctl/ls.go @@ -47,14 +47,10 @@ var lsCmd = &cli.Command{ return err } - format := shortListFormat - if c.Bool("l") || c.String("format") == "long" { - format = longListFormat - } - - isFirstSrc := true - for i := 0; i < c.Args().Len(); i++ { - conn, path, err := cfg.ParseProfileInput(c.Args().Get(i)) + // parse args + var storePathMap = make(map[*operations.SingleOperator][]string) + for _, arg := range c.Args().Slice() { + conn, path, err := cfg.ParseProfileInput(arg) if err != nil { logger.Error("parse profile input", zap.Error(err)) continue @@ -68,52 +64,76 @@ var lsCmd = &cli.Command{ so := operations.NewSingleOperator(store) - ch, err := so.List(path) - if err != nil { - logger.Error("list", - zap.String("path", path), - zap.Error(err)) - continue - } - - // print src path if more than 1 arg - if c.Args().Len() > 1 { - if isFirstSrc { - isFirstSrc = false - } else { - fmt.Printf("\n") + var args []string + args = append(args, path) + if hasMeta(path) { + args, err = so.Glob(path) + if err != nil { + logger.Error("glob", zap.Error(err)) + fmt.Printf("ls: cannot access '%s': No such file or directory\n", path) + continue } - fmt.Printf("%s:\n", c.Args().Get(i)) } - isFirst := true - var totalNum int - var totalSize int64 + storePathMap[so] = args + } - for v := range ch { - if v.Error != nil { - logger.Error("read next result", zap.Error(v.Error)) - break - } + format := shortListFormat + if c.Bool("l") || c.String("format") == "long" { + format = longListFormat + } - oa := parseObject(v.Object) - fmt.Print(oa.Format(format, isFirst)) + isFirstSrc := true + for so, paths := range storePathMap { + for _, path := range paths { + ch, err := so.List(path) + if err != nil { + logger.Error("list", + zap.String("path", path), + zap.Error(err)) + continue + } - // Update isFirst - if isFirst { - isFirst = false + // print src path if more than 1 arg + if len(storePathMap) > 1 || len(paths) > 1 { + if isFirstSrc { + isFirstSrc = false + } else { + fmt.Printf("\n") + } + // so.StatStorager.Service + ":" + path + fmt.Printf("%s:\n", path) } - totalNum += 1 - totalSize += oa.size - } - // End of line - fmt.Print("\n") + isFirst := true + var totalNum int + var totalSize int64 - // display summary information - if c.Bool(lsFlagSummarize) { - fmt.Printf("\n%14s %d\n", "Total Objects:", totalNum) - fmt.Printf("%14s %s\n", "Total Size:", units.BytesSize(float64(totalSize))) + for v := range ch { + if v.Error != nil { + logger.Error("read next result", zap.Error(v.Error)) + break + } + + oa := parseObject(v.Object) + fmt.Print(oa.Format(format, isFirst)) + + // Update isFirst + if isFirst { + isFirst = false + } + + totalNum += 1 + totalSize += oa.size + } + // End of line + fmt.Print("\n") + + // display summary information + if c.Bool(lsFlagSummarize) { + fmt.Printf("\n%14s %d\n", "Total Objects:", totalNum) + fmt.Printf("%14s %s\n", "Total Size:", units.BytesSize(float64(totalSize))) + } } } return diff --git a/cmd/byctl/utils.go b/cmd/byctl/utils.go index e36ca9d..c15a207 100644 --- a/cmd/byctl/utils.go +++ b/cmd/byctl/utils.go @@ -2,6 +2,8 @@ package main import ( "fmt" + "runtime" + "strings" "time" "github.com/Xuanwo/go-bufferpool" @@ -50,3 +52,11 @@ func parseLimit(text string) (types.Pair, error) { } }), nil } + +func hasMeta(path string) bool { + magicChars := `*?[{` + if runtime.GOOS != "windows" { + magicChars = `*?[{\` + } + return strings.ContainsAny(path, magicChars) +} diff --git a/docs/rfcs/96-add-glob-patterns-support.md b/docs/rfcs/96-add-glob-patterns-support.md index 35eb797..67bbedd 100644 --- a/docs/rfcs/96-add-glob-patterns-support.md +++ b/docs/rfcs/96-add-glob-patterns-support.md @@ -7,9 +7,9 @@ ## Background -Globs, also known as glob patterns, are patterns that can expand a wildcard pattern into a list of path that match the given pattern. +### Glob patterns -In the command shell, a wildcard is a short textual pattern, that can match another character (or characters) in a file path. It’s kind of a shortcut that allows you to specify a whole set of related path names using a single, concise pattern. +Globs, also known as glob patterns, are patterns that can expand a wildcard pattern into a list of path that match the given pattern. A string can be considered a wildcard pattern if it contains one of the following characters (unquoted and unescaped): `*`, `?`, `[` or `{`: @@ -22,6 +22,15 @@ A string can be considered a wildcard pattern if it contains one of the followin { } - (curly brackets) matches on any of a series of sub-patterns you specify, e.g. {a,b,c} matches one a, one b and one c. ``` +### Wildcards in cmd arguments + +In the command shell, a wildcard is a short textual pattern, that can match another character (or characters) in a file path. It’s kind of a shortcut that allows you to specify a whole set of related path names using a single, concise pattern. + +When the shell sees either of these characters unquoted and unescaped in a command line argument: + +- It attempts to expand the argument by interpreting it as a path and matching the wildcard to all possible files in the path. The resulting set of file paths is then sent to the target command as a list of arguments. +- Brace expansion `{...}` works differently to normal wildcards, in that the shell expands the braces before even looking for files: it actually generates all the permutations of the pattern you specify and then performs wildcard expansion on the results. + ## Proposal I propose to add glob support by using UNIX style wildcards in the path arguments of the command. @@ -36,18 +45,10 @@ Each wildcard will be evaluated against the source path. The following pattern s - {...}: Brace expansion, terms are separated by commas (without spaces) and each term must be the name of something or a wildcard. - \: Backslash, used as an "escape" character. -**Notice:** -Instead of expanding the braces before even looking for files, byctl attempts to determine whether the listed file name matches the file name pattern. - -Glob patterns can be used in the following commands: +### Implementation -- cat -- cp -- ls -- mv -- rm -- stat -- sync +- byctl will expand the argument by matching the wildcard to all possible file paths or objects in service. The resulting set of file paths will then send to the target command as a list of arguments. +- Instead of expanding the braces `{...}` before even looking for files, byctl attempts to determine whether the listed file name matches the file name pattern. ### Examples diff --git a/go.mod b/go.mod index c45b955..f6e57ce 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.15 require ( github.com/BurntSushi/toml v0.4.1 github.com/Xuanwo/go-bufferpool v0.2.0 + github.com/bmatcuk/doublestar/v4 v4.0.2 github.com/docker/go-units v0.4.0 github.com/google/uuid v1.3.0 github.com/panjf2000/ants/v2 v2.4.6 diff --git a/go.sum b/go.sum index 43c12f5..decd81d 100644 --- a/go.sum +++ b/go.sum @@ -109,6 +109,8 @@ github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7 github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/bmatcuk/doublestar/v4 v4.0.2 h1:X0krlUVAVmtr2cRoTqR8aDMrDqnB36ht8wpWTiQ3jsA= +github.com/bmatcuk/doublestar/v4 v4.0.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= diff --git a/operations/match.go b/operations/match.go new file mode 100644 index 0000000..eeabe37 --- /dev/null +++ b/operations/match.go @@ -0,0 +1,34 @@ +package operations + +import ( + "path/filepath" + + "github.com/bmatcuk/doublestar/v4" +) + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. +func (so *SingleOperator) Glob(path string) (matches []string, err error) { + unixPath := filepath.ToSlash(path) + base, _ := doublestar.SplitPattern(unixPath) + if base == "." { + base = "" + } + + ch, err := so.ListRecursively(base) + if err != nil { + return + } + + for v := range ch { + if v.Error != nil { + return nil, v.Error + } + + if ok, _ := doublestar.Match(unixPath, v.Object.Path); ok { + matches = append(matches, v.Object.Path) + } + } + + return matches, nil +} From f4f0e0710d922cd288caed74b0424ac2982f9871 Mon Sep 17 00:00:00 2001 From: JinnyYi Date: Tue, 30 Nov 2021 20:05:05 +0800 Subject: [PATCH 2/5] Add glob support --- cmd/byctl/cat.go | 67 ++++++++++++--- cmd/byctl/cp.go | 203 ++++++++++++++++++++++++++------------------ cmd/byctl/ls.go | 115 +++++++++++++++---------- cmd/byctl/mv.go | 166 ++++++++++++++++++++++-------------- cmd/byctl/rm.go | 155 ++++++++++++++++++++------------- cmd/byctl/sign.go | 86 ++++++++++++++----- cmd/byctl/stat.go | 73 ++++++++++------ cmd/byctl/tee.go | 77 +++++++++++++---- operations/match.go | 6 +- operations/stat.go | 30 ++++++- 10 files changed, 644 insertions(+), 334 deletions(-) diff --git a/cmd/byctl/cat.go b/cmd/byctl/cat.go index 8c504c0..0dba54e 100644 --- a/cmd/byctl/cat.go +++ b/cmd/byctl/cat.go @@ -1,6 +1,7 @@ package main import ( + "errors" "fmt" "github.com/urfave/cli/v2" @@ -30,35 +31,77 @@ var catCmd = &cli.Command{ return err } + var storePathMap = make(map[*operations.SingleOperator][]string) for i := 0; i < c.Args().Len(); i++ { - conn, key, err := cfg.ParseProfileInput(c.Args().Get(i)) + arg := c.Args().Get(i) + conn, key, err := cfg.ParseProfileInput(arg) if err != nil { - logger.Error("parse profile input from src", zap.Error(err)) + logger.Error("parse profile input from target", zap.Error(err)) continue } store, err := services.NewStoragerFromString(conn) if err != nil { - logger.Error("init src storager", zap.Error(err), zap.String("conn string", conn)) + logger.Error("init target storager", zap.Error(err), zap.String("conn string", conn)) continue } so := operations.NewSingleOperator(store) - ch, err := so.CatFile(key) - if err != nil { - logger.Error("run cat", zap.Error(err)) - continue + if hasMeta(key) { + objects, err := so.Glob(key) + if err != nil { + logger.Error("glob", zap.Error(err), zap.String("path", arg)) + continue + } + for _, o := range objects { + if o.Mode.IsDir() { + // so.StatStorager().Service + ":" + o.Path + fmt.Printf("cat: '%s': Is a directory\n", o.Path) + continue + } + storePathMap[so] = append(storePathMap[so], o.Path) + } + } else { + o, err := so.Stat(key) + if err != nil { + if errors.Is(err, services.ErrObjectNotExist) { + fmt.Printf("cat: '%s': No such file or directory\n", arg) + } else { + logger.Error("stat", zap.Error(err), zap.String("path", arg)) + } + continue + } else { + if o.Mode.IsDir() { + fmt.Printf("cat: '%s': Is a directory\n", arg) + continue + } else if o.Mode.IsPart() { + fmt.Printf("cat: '%s': Is an in progress multipart upload task\n", arg) + continue + } + } + err = nil + storePathMap[so] = append(storePathMap[so], key) } + } - for v := range ch { - if v.Error != nil { - logger.Error("cat", zap.Error(err)) + for so, paths := range storePathMap { + for _, path := range paths { + ch, err := so.CatFile(path) + if err != nil { + logger.Error("run cat", zap.Error(err)) continue } - } - fmt.Printf("\n") + for v := range ch { + if v.Error != nil { + logger.Error("cat", zap.Error(err)) + continue + } + } + + fmt.Printf("\n") + } } return nil diff --git a/cmd/byctl/cp.go b/cmd/byctl/cp.go index c2cdbdd..0160db6 100644 --- a/cmd/byctl/cp.go +++ b/cmd/byctl/cp.go @@ -58,38 +58,6 @@ var cpCmd = &cli.Command{ return err } - argsNum := c.Args().Len() - - dstConn, dstKey, err := cfg.ParseProfileInput(c.Args().Get(argsNum - 1)) - if err != nil { - logger.Error("parse profile input from dst", zap.Error(err)) - return err - } - - dst, err := services.NewStoragerFromString(dstConn) - if err != nil { - logger.Error("init dst storager", zap.Error(err), zap.String("conn string", dstConn)) - return err - } - - dstSo := operations.NewSingleOperator(dst) - - dstObject, err := dstSo.Stat(dstKey) - if err != nil { - if errors.Is(err, services.ErrObjectNotExist) { - err = nil - } else { - logger.Error("stat", zap.Error(err), zap.String("dst path", dstKey)) - return err - } - } - if argsNum > 2 { - if dstObject != nil && !dstObject.Mode.IsDir() { - fmt.Printf("cp: target '%s' is not a directory\n", dstKey) - return fmt.Errorf("cp: target '%s' is not a directory", dstKey) - } - } - // Handle read pairs. var readPairs []types.Pair if c.IsSet(flagReadSpeedLimitName) { @@ -127,79 +95,150 @@ var cpCmd = &cli.Command{ return err } - for i := 0; i < argsNum-1; i++ { - srcConn, srcKey, err := cfg.ParseProfileInput(c.Args().Get(i)) + // parse src args + srcNum := 0 + var storeObjectMap = make(map[types.Storager][]*types.Object) + for i := 0; i < c.Args().Len()-1; i++ { + arg := c.Args().Get(i) + conn, key, err := cfg.ParseProfileInput(arg) if err != nil { logger.Error("parse profile input from src", zap.Error(err)) continue } - src, err := services.NewStoragerFromString(srcConn) - if err != nil { - logger.Error("init src storager", zap.Error(err), zap.String("conn string", srcConn)) - continue - } - - so := operations.NewSingleOperator(src) - - srcObject, err := so.Stat(srcKey) + store, err := services.NewStoragerFromString(conn) if err != nil { - logger.Error("stat", zap.String("path", srcKey), zap.Error(err)) + logger.Error("init src storager", zap.Error(err), zap.String("conn string", conn)) continue } - if srcObject.Mode.IsDir() && !c.Bool(cpFlagRecursive) { - fmt.Printf("cp: -r not specified; omitting directory '%s'\n", srcKey) - continue - } + so := operations.NewSingleOperator(store) - var size int64 - if srcObject.Mode.IsRead() { - n, ok := srcObject.GetContentLength() - if !ok { - logger.Error("can't get object content length", zap.String("path", srcKey)) + if hasMeta(key) { + objects, err := so.Glob(key) + if err != nil { + logger.Error("glob", zap.Error(err), zap.String("path", arg)) continue } - size = n - } + for _, o := range objects { + if o.Mode.IsDir() && !c.Bool(cpFlagRecursive) { + // so.StatStorager().Service + ":" + o.Path + fmt.Printf("cp: -r not specified; omitting directory '%s'\n", o.Path) + continue + } + storeObjectMap[store] = append(storeObjectMap[store], o) + srcNum++ + } + } else { + o, err := so.Stat(key) + if err != nil && !errors.Is(err, services.ErrObjectNotExist) { + if errors.Is(err, services.ErrObjectNotExist) { + fmt.Printf("cp: cannot stat '%s': No such file or directory\n", arg) + } else { + logger.Error("stat", zap.Error(err), zap.String("path", arg)) + } + continue + } + if err == nil { + if o.Mode.IsDir() && !c.Bool(cpFlagRecursive) { + fmt.Printf("cp: -r not specified; omitting directory '%s'\n", arg) + continue + } else if o.Mode.IsPart() { + fmt.Printf("cp: cannot copy '%s': Is an in progress multipart upload task\n", arg) + continue + } + } - do := operations.NewDualOperator(src, dst) - if c.IsSet(flagWorkersName) { - do.WithWorkers(c.Int(flagWorkersName)) + err = nil + storeObjectMap[store] = append(storeObjectMap[store], o) + srcNum++ } + } - // set read pairs - do.WithReadPairs(readPairs...) - // set write pairs - do.WithWritePairs(writePairs...) + // check dst + dstConn, dstKey, err := cfg.ParseProfileInput(c.Args().Get(c.Args().Len() - 1)) + if err != nil { + logger.Error("parse profile input from dst", zap.Error(err)) + return err + } - realDstKey := dstKey - if argsNum > 2 || (dstObject != nil && dstObject.Mode.IsDir()) { - realDstKey = filepath.Join(dstKey, filepath.Base(srcKey)) - } + dst, err := services.NewStoragerFromString(dstConn) + if err != nil { + logger.Error("init dst storager", zap.Error(err), zap.String("conn string", dstConn)) + return err + } + + dstSo := operations.NewSingleOperator(dst) - var ch chan *operations.EmptyResult - if c.Bool(cpFlagRecursive) && srcObject.Mode.IsDir() { - ch, err = do.CopyRecursively(srcKey, realDstKey, multipartThreshold) - } else if size < multipartThreshold { - ch, err = do.CopyFileViaWrite(srcKey, realDstKey, size) + dstObject, err := dstSo.Stat(dstKey) + if err != nil { + if errors.Is(err, services.ErrObjectNotExist) { + err = nil } else { - // TODO: we will support other copy method later. - ch, err = do.CopyFileViaMultipart(srcKey, realDstKey, size) + logger.Error("stat", zap.Error(err), zap.String("dst path", dstKey)) + return err } - if err != nil { - logger.Error("start copy", - zap.String("src", srcKey), - zap.String("dst", realDstKey), - zap.Error(err)) - continue + } + if dstObject != nil { + if dstObject.Mode.IsPart() { + fmt.Printf("cp: target '%s' is an in progress multipart upload task\n", dstKey) + return fmt.Errorf("cp: target '%s' is an in progress multipart upload task", dstKey) } - - for v := range ch { - logger.Error("read next result", zap.Error(v.Error)) + if srcNum > 1 && !dstObject.Mode.IsDir() { + fmt.Printf("cp: target '%s' is not a directory\n", dstKey) + return fmt.Errorf("cp: target '%s' is not a directory", dstKey) } } + for store, objects := range storeObjectMap { + for _, o := range objects { + var size int64 + if o.Mode.IsRead() { + n, ok := o.GetContentLength() + if !ok { + logger.Error("can't get object content length", zap.String("path", o.Path)) + continue + } + size = n + } + + do := operations.NewDualOperator(store, dst) + if c.IsSet(flagWorkersName) { + do.WithWorkers(c.Int(flagWorkersName)) + } + + // set read pairs + do.WithReadPairs(readPairs...) + // set write pairs + do.WithWritePairs(writePairs...) + + realDstKey := dstKey + if srcNum > 1 || (dstObject != nil && dstObject.Mode.IsDir()) { + realDstKey = filepath.Join(dstKey, filepath.Base(o.Path)) + } + + var ch chan *operations.EmptyResult + if c.Bool(cpFlagRecursive) && o.Mode.IsDir() { + ch, err = do.CopyRecursively(o.Path, realDstKey, multipartThreshold) + } else if size < multipartThreshold { + ch, err = do.CopyFileViaWrite(o.Path, realDstKey, size) + } else { + // TODO: we will support other copy method later. + ch, err = do.CopyFileViaMultipart(o.Path, realDstKey, size) + } + if err != nil { + logger.Error("start copy", + zap.String("src", o.Path), + zap.String("dst", realDstKey), + zap.Error(err)) + continue + } + + for v := range ch { + logger.Error("read next result", zap.Error(v.Error)) + } + } + } return }, } diff --git a/cmd/byctl/ls.go b/cmd/byctl/ls.go index 1b6e5dd..47b62b2 100644 --- a/cmd/byctl/ls.go +++ b/cmd/byctl/ls.go @@ -10,6 +10,7 @@ import ( "go.uber.org/zap" "go.beyondstorage.io/beyond-ctl/operations" + "go.beyondstorage.io/v5/pairs" "go.beyondstorage.io/v5/services" "go.beyondstorage.io/v5/types" ) @@ -48,8 +49,15 @@ var lsCmd = &cli.Command{ } // parse args - var storePathMap = make(map[*operations.SingleOperator][]string) - for _, arg := range c.Args().Slice() { + var args []string + if c.Args().Len() == 0 { + args = append(args, "") + } else { + args = c.Args().Slice() + } + + var storeObjectMap = make(map[*operations.SingleOperator][]*types.Object) + for _, arg := range args { conn, path, err := cfg.ParseProfileInput(arg) if err != nil { logger.Error("parse profile input", zap.Error(err)) @@ -64,18 +72,30 @@ var lsCmd = &cli.Command{ so := operations.NewSingleOperator(store) - var args []string - args = append(args, path) + var objects []*types.Object if hasMeta(path) { - args, err = so.Glob(path) + objects, err = so.Glob(path) if err != nil { logger.Error("glob", zap.Error(err)) fmt.Printf("ls: cannot access '%s': No such file or directory\n", path) continue } - } + storeObjectMap[so] = objects + } else { + var o *types.Object + if path == "" { + o = store.Create(path, pairs.WithObjectMode(types.ModeDir)) + } else { + o, err = so.Stat(path) + if err != nil { + logger.Error("stat", zap.Error(err)) + fmt.Printf("stat: cannot access '%s': No such file or directory\n", path) + continue + } + } - storePathMap[so] = args + storeObjectMap[so] = append(storeObjectMap[so], o) + } } format := shortListFormat @@ -84,55 +104,62 @@ var lsCmd = &cli.Command{ } isFirstSrc := true - for so, paths := range storePathMap { - for _, path := range paths { - ch, err := so.List(path) - if err != nil { - logger.Error("list", - zap.String("path", path), - zap.Error(err)) - continue - } - + for so, objects := range storeObjectMap { + for _, o := range objects { // print src path if more than 1 arg - if len(storePathMap) > 1 || len(paths) > 1 { + if len(storeObjectMap) > 1 || len(objects) > 1 { if isFirstSrc { isFirstSrc = false } else { fmt.Printf("\n") } - // so.StatStorager.Service + ":" + path - fmt.Printf("%s:\n", path) + //so.StatStorager().Service + ":" + path + fmt.Printf("%s:\n", o.Path) } - isFirst := true - var totalNum int - var totalSize int64 - - for v := range ch { - if v.Error != nil { - logger.Error("read next result", zap.Error(v.Error)) - break + if o.Mode.IsDir() { + ch, err := so.List(o.Path) + if err != nil { + logger.Error("list", + zap.String("path", o.Path), + zap.Error(err)) + continue } - oa := parseObject(v.Object) - fmt.Print(oa.Format(format, isFirst)) + isFirst := true + var totalNum int + var totalSize int64 - // Update isFirst - if isFirst { - isFirst = false - } + for v := range ch { + if v.Error != nil { + logger.Error("read next result", zap.Error(v.Error)) + break + } - totalNum += 1 - totalSize += oa.size - } - // End of line - fmt.Print("\n") + oa := parseObject(v.Object) + fmt.Print(oa.Format(format, isFirst)) - // display summary information - if c.Bool(lsFlagSummarize) { - fmt.Printf("\n%14s %d\n", "Total Objects:", totalNum) - fmt.Printf("%14s %s\n", "Total Size:", units.BytesSize(float64(totalSize))) + // Update isFirst + if isFirst { + isFirst = false + } + + totalNum += 1 + totalSize += oa.size + } + // End of line + fmt.Print("\n") + + // display summary information + if c.Bool(lsFlagSummarize) { + fmt.Printf("\n%14s %d\n", "Total Objects:", totalNum) + fmt.Printf("%14s %s\n", "Total Size:", units.BytesSize(float64(totalSize))) + } + } else { + oa := parseObject(o) + fmt.Print(oa.Format(format, true)) + // End of line + fmt.Print("\n") } } } @@ -184,6 +211,8 @@ func (oa objectAttr) longFormat(isFirst bool) string { } else if oa.mode.IsDir() { // Keep align with read. buf.AppendString("dir ") + } else if oa.mode.IsPart() { + buf.AppendString("part") } // FIXME: it's hard to calculate the padding, so we hardcoded the padding here. buf.AppendString(fmt.Sprintf("%12d", oa.size)) diff --git a/cmd/byctl/mv.go b/cmd/byctl/mv.go index 474c1e7..2ebf732 100644 --- a/cmd/byctl/mv.go +++ b/cmd/byctl/mv.go @@ -3,6 +3,7 @@ package main import ( "errors" "fmt" + "github.com/docker/go-units" "github.com/urfave/cli/v2" "go.uber.org/zap" @@ -94,9 +95,68 @@ var mvCmd = &cli.Command{ return err } - args := c.Args().Len() + // parse src args + srcNum := 0 + var storeObjectMap = make(map[types.Storager][]*types.Object) + for i := 0; i < c.Args().Len()-1; i++ { + arg := c.Args().Get(i) + conn, key, err := cfg.ParseProfileInput(arg) + if err != nil { + logger.Error("parse profile input from src", zap.Error(err)) + continue + } + + store, err := services.NewStoragerFromString(conn) + if err != nil { + logger.Error("init src storager", zap.Error(err), zap.String("conn string", conn)) + continue + } + + so := operations.NewSingleOperator(store) - dstConn, dstKey, err := cfg.ParseProfileInput(c.Args().Get(args - 1)) + if hasMeta(key) { + objects, err := so.Glob(key) + if err != nil { + logger.Error("glob", zap.Error(err), zap.String("path", arg)) + continue + } + for _, o := range objects { + if o.Mode.IsDir() && !c.Bool(mvFlagRecursive) { + // so.StatStorager().Service + ":" + o.Path + fmt.Printf("mv: -r not specified; omitting directory '%s'\n", o.Path) + continue + } + storeObjectMap[store] = append(storeObjectMap[store], o) + srcNum++ + } + } else { + o, err := so.Stat(key) + if err != nil && !errors.Is(err, services.ErrObjectNotExist) { + if errors.Is(err, services.ErrObjectNotExist) { + fmt.Printf("mv: cannot stat '%s': No such file or directory\n", arg) + } else { + logger.Error("stat", zap.Error(err), zap.String("path", arg)) + } + continue + } + if err == nil { + if o.Mode.IsDir() && !c.Bool(mvFlagRecursive) { + fmt.Printf("mv: -r not specified; omitting directory '%s'\n", arg) + continue + } else if o.Mode.IsPart() { + fmt.Printf("mv: cannot move '%s': Is an in progress multipart upload task\n", arg) + continue + } + } + + err = nil + storeObjectMap[store] = append(storeObjectMap[store], o) + srcNum++ + } + } + + // check dst + dstConn, dstKey, err := cfg.ParseProfileInput(c.Args().Get(c.Args().Len() - 1)) if err != nil { logger.Error("parse profile input from dst", zap.Error(err)) return err @@ -119,80 +179,60 @@ var mvCmd = &cli.Command{ return err } } - if args > 2 { - if dstObject != nil && !dstObject.Mode.IsDir() { + if dstObject != nil { + if dstObject.Mode.IsPart() { + fmt.Printf("mv: target '%s' is an in progress multipart upload task\n", dstKey) + return fmt.Errorf("mv: target '%s' is an in progress multipart upload task", dstKey) + } + if srcNum > 1 && !dstObject.Mode.IsDir() { fmt.Printf("mv: target '%s' is not a directory\n", dstKey) return fmt.Errorf("mv: target '%s' is not a directory", dstKey) } } - for i := 0; i < args-1; i++ { - srcConn, srcKey, err := cfg.ParseProfileInput(c.Args().Get(i)) - if err != nil { - logger.Error("parse profile input from src", zap.Error(err)) - continue - } + for store, objects := range storeObjectMap { + for _, o := range objects { + var size int64 + if o.Mode.IsRead() { + n, ok := o.GetContentLength() + if !ok { + logger.Error("can't get object content length", zap.String("path", o.Path)) + continue + } + size = n + } - src, err := services.NewStoragerFromString(srcConn) - if err != nil { - logger.Error("init src storager", zap.Error(err), zap.String("conn string", srcConn)) - continue - } + do := operations.NewDualOperator(store, dst) + if c.IsSet(flagWorkersName) { + do.WithWorkers(c.Int(flagWorkersName)) + } - so := operations.NewSingleOperator(src) + // set read pairs + do.WithReadPairs(readPairs...) + // set write pairs + do.WithWritePairs(writePairs...) - srcObject, err := so.Stat(srcKey) - if err != nil { - logger.Error("stat", zap.String("path", srcKey), zap.Error(err)) - continue - } - - if srcObject.Mode.IsDir() && !c.Bool(cpFlagRecursive) { - fmt.Printf("mv: -r not specified; omitting directory '%s'\n", srcKey) - continue - } + realDstKey := dstKey + if srcNum > 1 || (dstObject != nil && dstObject.Mode.IsDir()) { + realDstKey = filepath.Join(dstKey, filepath.Base(o.Path)) + } - var size int64 - if srcObject.Mode.IsRead() { - n, ok := srcObject.GetContentLength() - if !ok { - logger.Error("can't get object content length", zap.String("path", srcKey)) + if c.Bool(mvFlagRecursive) && o.Mode.IsDir() { + err = do.MoveRecursively(o.Path, realDstKey, multipartThreshold) + } else if size < multipartThreshold { + err = do.MoveFileViaWrite(o.Path, realDstKey, size) + } else { + err = do.MoveFileViaMultipart(o.Path, realDstKey, size) + } + if err != nil { + logger.Error("start move", + zap.String("src", o.Path), + zap.String("dst", realDstKey), + zap.Error(err)) continue } - size = n - } - - do := operations.NewDualOperator(src, dst) - if c.IsSet(flagWorkersName) { - do.WithWorkers(c.Int(flagWorkersName)) - } - - // set read pairs - do.WithReadPairs(readPairs...) - // set write pairs - do.WithWritePairs(writePairs...) - - realDstKey := dstKey - if args > 2 || (dstObject != nil && dstObject.Mode.IsDir()) { - realDstKey = filepath.Join(dstKey, filepath.Base(srcKey)) - } - - if c.Bool(mvFlagRecursive) && srcObject.Mode.IsDir() { - err = do.MoveRecursively(srcKey, realDstKey, multipartThreshold) - } else if size < multipartThreshold { - err = do.MoveFileViaWrite(srcKey, realDstKey, size) - } else { - err = do.MoveFileViaMultipart(srcKey, realDstKey, size) - } - if err != nil { - logger.Error("start move", - zap.String("src", srcKey), - zap.String("dst", realDstKey), - zap.Error(err)) - continue } } - return }, } diff --git a/cmd/byctl/rm.go b/cmd/byctl/rm.go index 2957d04..ef0befe 100644 --- a/cmd/byctl/rm.go +++ b/cmd/byctl/rm.go @@ -9,6 +9,7 @@ import ( "go.beyondstorage.io/beyond-ctl/operations" "go.beyondstorage.io/v5/services" + "go.beyondstorage.io/v5/types" ) const ( @@ -51,8 +52,10 @@ var rmCmd = &cli.Command{ return err } + var storeObjectMap = make(map[*operations.SingleOperator][]*types.Object) for i := 0; i < c.Args().Len(); i++ { - conn, key, err := cfg.ParseProfileInput(c.Args().Get(i)) + arg := c.Args().Get(i) + conn, key, err := cfg.ParseProfileInput(arg) if err != nil { logger.Error("parse profile input from src", zap.Error(err)) continue @@ -66,80 +69,110 @@ var rmCmd = &cli.Command{ so := operations.NewSingleOperator(store) - if c.Bool(rmFlagMultipart) && !c.Bool(rmFlagRecursive) { - // Remove all multipart objects whose path is `key` - ch, err := so.DeleteMultipart(key) + if hasMeta(key) { + objects, err := so.Glob(key) if err != nil { - logger.Error("delete multipart", - zap.String("path", key), - zap.Error(err)) + logger.Error("glob", zap.Error(err), zap.String("path", arg)) continue } - - if ch != nil { - for v := range ch { - if v.Error != nil { - logger.Error("delete", zap.Error(err)) - continue - } + for _, o := range objects { + if o.Mode.IsDir() && !c.Bool(rmFlagRecursive) { + // so.StatStorager().Service + ":" + o.Path + fmt.Printf("rm: cannot remove '%s': Is a directory\n", o.Path) + continue } + storeObjectMap[so] = append(storeObjectMap[so], o) } - } else if c.Bool(rmFlagMultipart) && c.Bool(rmFlagRecursive) { - // Remove all multipart objects prefixed with `key`. - ch, err := so.DeleteMultipartViaRecursively(key) - if err != nil { - logger.Error("delete multipart recursively", - zap.String("path", key), - zap.Error(err)) + } else { + o, err := so.Stat(key) + if err != nil && !errors.Is(err, services.ErrObjectNotExist) { + if errors.Is(err, services.ErrObjectNotExist) { + fmt.Printf("rm: cannot remove '%s': No such file or directory\n", arg) + } else { + logger.Error("stat", zap.Error(err), zap.String("path", arg)) + } continue } + if err == nil { + if o.Mode.IsDir() && !c.Bool(rmFlagRecursive) { + fmt.Printf("rm: cannot remove '%s': Is a directory\n", arg) + continue + } else if o.Mode.IsPart() && !c.Bool(rmFlagMultipart) { + fmt.Printf("rm: cannot remove '%s': Is an in progress multipart upload task\n", arg) + continue + } + } - if ch != nil { - for v := range ch { - if v.Error != nil { - logger.Error("delete", zap.Error(err)) - continue + err = nil + storeObjectMap[so] = append(storeObjectMap[so], o) + } + } + + for so, objects := range storeObjectMap { + for _, o := range objects { + if o.Mode.IsDir() { + // recursive remove a dir. + ch, err := so.DeleteRecursively(o.Path) + if err != nil { + logger.Error("delete recursively", + zap.String("path", o.Path), + zap.Error(err)) + continue + } + + if ch != nil { + for v := range ch { + if v.Error != nil { + logger.Error("delete", zap.Error(err)) + continue + } } } - } - } else if !c.Bool(rmFlagMultipart) && c.Bool(rmFlagRecursive) { - // recursive remove a dir. - ch, err := so.DeleteRecursively(key) - if err != nil { - logger.Error("delete recursively", - zap.String("path", key), - zap.Error(err)) - continue - } + } else if o.Mode.IsPart() { + if !c.Bool(rmFlagRecursive) { + // Remove all multipart objects whose path is `key` + ch, err := so.DeleteMultipart(o.Path) + if err != nil { + logger.Error("delete multipart", + zap.String("path", o.Path), + zap.Error(err)) + continue + } - if ch != nil { - for v := range ch { - if v.Error != nil { - logger.Error("delete", zap.Error(err)) + if ch != nil { + for v := range ch { + if v.Error != nil { + logger.Error("delete", zap.Error(err)) + continue + } + } + } + } else { + // Remove all multipart objects prefixed with `key`. + ch, err := so.DeleteMultipartViaRecursively(o.Path) + if err != nil { + logger.Error("delete multipart recursively", + zap.String("path", o.Path), + zap.Error(err)) continue } - } - } - } else { - // remove single file - o, err := so.Stat(key) - if err != nil && errors.Is(err, services.ErrObjectNotExist) { - fmt.Printf("rm: cannot remove '%s': No such file or directory\n", key) - continue - } - if err != nil { - logger.Error("stat", zap.String("path", key), zap.Error(err)) - continue - } - if o.Mode.IsDir() { - fmt.Printf("rm: cannot remove '%s': Is a directory\n", key) - continue - } - err = so.Delete(key) - if err != nil { - logger.Error("delete", zap.String("path", key), zap.Error(err)) - continue + if ch != nil { + for v := range ch { + if v.Error != nil { + logger.Error("delete", zap.Error(err)) + continue + } + } + } + } + } else { + // remove single file + err = so.Delete(o.Path) + if err != nil { + logger.Error("delete", zap.String("path", o.Path), zap.Error(err)) + continue + } } } } diff --git a/cmd/byctl/sign.go b/cmd/byctl/sign.go index 3f4848d..d39ccff 100644 --- a/cmd/byctl/sign.go +++ b/cmd/byctl/sign.go @@ -1,6 +1,7 @@ package main import ( + "errors" "fmt" "time" @@ -43,44 +44,85 @@ var signCmd = &cli.Command{ return err } - isFirst := true - args := c.Args().Len() - for i := 0; i < args; i++ { - conn, key, err := cfg.ParseProfileInput(c.Args().Get(i)) + var storePathMap = make(map[*operations.SingleOperator][]string) + for i := 0; i < c.Args().Len(); i++ { + arg := c.Args().Get(i) + conn, key, err := cfg.ParseProfileInput(arg) if err != nil { - logger.Error("parse profile input from source", zap.Error(err)) + logger.Error("parse profile input from target", zap.Error(err)) continue } store, err := services.NewStoragerFromString(conn) if err != nil { - logger.Error("init source storager", zap.Error(err), zap.String("conn string", conn)) + logger.Error("init target storager", zap.Error(err), zap.String("conn string", conn)) continue } so := operations.NewSingleOperator(store) - // The default is 300 second. - second := c.Int(signFlagExpire) - expire := time.Duration(second) * time.Second - - url, err := so.Sign(key, expire) - if err != nil { - logger.Error("run sign", zap.Error(err)) - continue - } - - if args > 1 { - if isFirst { - isFirst = false + if hasMeta(key) { + objects, err := so.Glob(key) + if err != nil { + logger.Error("glob", zap.Error(err), zap.String("path", arg)) + continue + } + for _, o := range objects { + if o.Mode.IsDir() { + // so.StatStorager().Service + ":" + o.Path + fmt.Printf("sign: '%s': Is a directory\n", o.Path) + continue + } + storePathMap[so] = append(storePathMap[so], o.Path) + } + } else { + o, err := so.Stat(key) + if err != nil { + if errors.Is(err, services.ErrObjectNotExist) { + fmt.Printf("sign: '%s': No such file or directory\n", arg) + } else { + logger.Error("stat", zap.Error(err), zap.String("path", arg)) + } + continue } else { - fmt.Printf("\n") + if o.Mode.IsDir() { + fmt.Printf("sign: '%s': Is a directory\n", arg) + continue + } else if o.Mode.IsPart() { + fmt.Printf("sign: '%s': Is an in progress multipart upload task\n", arg) + continue + } } - fmt.Printf("%s:\n", c.Args().Get(i)) + err = nil + storePathMap[so] = append(storePathMap[so], key) } - fmt.Println(url) } + // The default is 300 second. + second := c.Int(signFlagExpire) + expire := time.Duration(second) * time.Second + + isFirst := true + for so, paths := range storePathMap { + for _, path := range paths { + url, err := so.Sign(path, expire) + if err != nil { + logger.Error("run sign", zap.Error(err)) + continue + } + + if len(paths) > 1 { + if isFirst { + isFirst = false + } else { + fmt.Printf("\n") + } + // so.StatStorager().Service + ":" + o.Path + fmt.Printf("%s:\n", path) + } + fmt.Println(url) + } + } return nil }, } diff --git a/cmd/byctl/stat.go b/cmd/byctl/stat.go index e85d921..eac0bde 100644 --- a/cmd/byctl/stat.go +++ b/cmd/byctl/stat.go @@ -46,9 +46,13 @@ var statCmd = &cli.Command{ return err } + format := normalFormat + if c.Bool(statFlagJson) { + format = jsonFormat + } + isFirst := true - args := c.Args().Len() - for i := 0; i < args; i++ { + for i := 0; i < c.Args().Len(); i++ { conn, key, err := cfg.ParseProfileInput(c.Args().Get(i)) if err != nil { logger.Error("parse profile input from src", zap.Error(err)) @@ -63,11 +67,6 @@ var statCmd = &cli.Command{ so := operations.NewSingleOperator(store) - format := normalFormat - if c.Bool(statFlagJson) { - format = jsonFormat - } - var out string if key == "" { meta := so.StatStorager() @@ -78,35 +77,53 @@ var statCmd = &cli.Command{ logger.Error("format storager", zap.Error(err)) continue } - } else { - o, err := so.Stat(key) - if err != nil { - logger.Error("stat", zap.Error(err)) - continue - } - - fm, err := parseFileObject(o) - if err != nil { - logger.Error("parse file object", zap.Error(err)) - continue - } - - out, err = fm.FormatFile(format) - if err != nil { - logger.Error("format file", zap.Error(err)) - continue - } - } - if args > 1 { if isFirst { isFirst = false } else { fmt.Printf("\n") } fmt.Printf("%s\n", c.Args().Get(i)) + fmt.Println(out) + } else { + var objects []*types.Object + if hasMeta(key) { + objects, err = so.Glob(key) + if err != nil { + logger.Error("glob", zap.Error(err), zap.String("path", key)) + continue + } + } else { + o, err := so.Stat(key) + if err != nil { + logger.Error("stat", zap.Error(err)) + continue + } + objects = append(objects, o) + } + + for _, o := range objects { + fm, err := parseFileObject(o) + if err != nil { + logger.Error("parse file object", zap.Error(err)) + continue + } + + out, err = fm.FormatFile(format) + if err != nil { + logger.Error("format file", zap.Error(err)) + continue + } + if isFirst { + isFirst = false + } else { + fmt.Printf("\n") + } + // so.StatStorager().Service + ":" + o.Path + fmt.Printf("%s\n", strings.SplitN(c.Args().Get(i), ":", 2)[0]+":"+o.Path) + fmt.Println(out) + } } - fmt.Println(out) } return diff --git a/cmd/byctl/tee.go b/cmd/byctl/tee.go index d67f97b..d90e6aa 100644 --- a/cmd/byctl/tee.go +++ b/cmd/byctl/tee.go @@ -2,6 +2,7 @@ package main import ( "bytes" + "errors" "fmt" "github.com/docker/go-units" @@ -44,15 +45,16 @@ var teeCmd = &cli.Command{ return err } - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(c.App.Reader) + expectedSize, err := units.RAMInBytes(c.String(teeFlagExpectSize)) if err != nil { - logger.Error("read data", zap.Error(err)) + logger.Error("expected-size is invalid", zap.String("input", c.String(teeFlagExpectSize)), zap.Error(err)) return err } + var storePathMap = make(map[*operations.SingleOperator][]string) for i := 0; i < c.Args().Len(); i++ { - conn, key, err := cfg.ParseProfileInput(c.Args().Get(i)) + arg := c.Args().Get(i) + conn, key, err := cfg.ParseProfileInput(arg) if err != nil { logger.Error("parse profile input from target", zap.Error(err)) continue @@ -66,26 +68,65 @@ var teeCmd = &cli.Command{ so := operations.NewSingleOperator(store) - expectedSize, err := units.RAMInBytes(c.String(teeFlagExpectSize)) - if err != nil { - logger.Error("expected-size is invalid", zap.String("input", c.String(teeFlagExpectSize)), zap.Error(err)) - continue + if hasMeta(key) { + objects, err := so.Glob(key) + if err != nil { + logger.Error("glob", zap.Error(err), zap.String("path", arg)) + continue + } + for _, o := range objects { + if o.Mode.IsDir() { + // so.StatStorager().Service + ":" + o.Path + fmt.Printf("tee: '%s': Is a directory\n", o.Path) + continue + } + storePathMap[so] = append(storePathMap[so], o.Path) + } + } else { + o, err := so.Stat(key) + if err == nil { + if o.Mode.IsDir() { + fmt.Printf("tee: '%s': Is a directory\n", arg) + continue + } else if o.Mode.IsPart() { + fmt.Printf("tee: '%s': Is an in progress multipart upload task\n", arg) + continue + } + } + if err != nil && !errors.Is(err, services.ErrObjectNotExist) { + logger.Error("stat", zap.Error(err), zap.String("path", arg)) + continue + } + err = nil + storePathMap[so] = append(storePathMap[so], key) } + } - ch, err := so.TeeRun(key, expectedSize, bytes.NewReader(buf.Bytes())) - if err != nil { - logger.Error("run tee", zap.Error(err)) - continue - } + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(c.App.Reader) + if err != nil { + logger.Error("read data", zap.Error(err)) + return err + } - for v := range ch { - if v.Error != nil { - logger.Error("tee", zap.Error(err)) + for so, paths := range storePathMap { + for _, path := range paths { + ch, err := so.TeeRun(path, expectedSize, bytes.NewReader(buf.Bytes())) + if err != nil { + logger.Error("run tee", zap.Error(err)) continue } - } - fmt.Printf("Stdin is saved to <%s>\n", key) + for v := range ch { + if v.Error != nil { + logger.Error("tee", zap.Error(err)) + continue + } + } + + // so.StatStorager().Service + ":" + o.Path + fmt.Printf("Stdin is saved to <%s>\n", path) + } } return nil diff --git a/operations/match.go b/operations/match.go index eeabe37..b4413ff 100644 --- a/operations/match.go +++ b/operations/match.go @@ -4,11 +4,13 @@ import ( "path/filepath" "github.com/bmatcuk/doublestar/v4" + + "go.beyondstorage.io/v5/types" ) // Glob returns the names of all files matching pattern or nil // if there is no matching file. -func (so *SingleOperator) Glob(path string) (matches []string, err error) { +func (so *SingleOperator) Glob(path string) (matches []*types.Object, err error) { unixPath := filepath.ToSlash(path) base, _ := doublestar.SplitPattern(unixPath) if base == "." { @@ -26,7 +28,7 @@ func (so *SingleOperator) Glob(path string) (matches []string, err error) { } if ok, _ := doublestar.Match(unixPath, v.Object.Path); ok { - matches = append(matches, v.Object.Path) + matches = append(matches, v.Object) } } diff --git a/operations/stat.go b/operations/stat.go index b5f05f7..36e3e88 100644 --- a/operations/stat.go +++ b/operations/stat.go @@ -12,6 +12,7 @@ import ( func (so *SingleOperator) Stat(path string) (o *types.Object, err error) { o, err = so.store.Stat(path) + // so.store.Features().VirtualDir if err != nil && errors.Is(err, services.ErrObjectNotExist) { it, cerr := so.store.List(path, pairs.WithListMode(types.ListModeDir)) if cerr == nil { @@ -25,10 +26,9 @@ func (so *SingleOperator) Stat(path string) (o *types.Object, err error) { err = cerr break } - if (obj.Mode.IsDir() && strings.TrimSuffix(obj.Path, "/") == strings.TrimSuffix(path, "/")) || + if (obj.Mode.IsDir() && strings.HasPrefix(obj.Path, strings.TrimSuffix(path, "/")+"/")) || (!obj.Mode.IsDir() && strings.HasPrefix(obj.Path, strings.TrimSuffix(path, "/")+"/")) { - o = so.store.Create(path) - o.Mode = types.ModeDir + o = so.store.Create(path, pairs.WithObjectMode(types.ModeDir)) err = nil break } @@ -36,6 +36,30 @@ func (so *SingleOperator) Stat(path string) (o *types.Object, err error) { } } + // in progress multipart upload + if err != nil && errors.Is(err, services.ErrObjectNotExist) { + // so.store.Features().CreateMultipart + if _, ok := so.store.(types.Multiparter); ok { + it, cerr := so.store.List(path, pairs.WithListMode(types.ListModePart)) + if cerr == nil { + for { + obj, cerr := it.Next() + if cerr != nil { + if !errors.Is(cerr, types.IterateDone) { + err = cerr + } + break + } + if obj.Path == path { + o = so.store.Create(path, pairs.WithMultipartID(obj.MustGetMultipartID())) + err = nil + break + } + } + } + } + } + if err != nil { return nil, err } From e8db6a5327f3b347401f2f94e095dc16a877e5e7 Mon Sep 17 00:00:00 2001 From: JinnyYi Date: Tue, 30 Nov 2021 20:56:31 +0800 Subject: [PATCH 3/5] Fix ls error --- cmd/byctl/ls.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/byctl/ls.go b/cmd/byctl/ls.go index 47b62b2..795f7ab 100644 --- a/cmd/byctl/ls.go +++ b/cmd/byctl/ls.go @@ -80,7 +80,7 @@ var lsCmd = &cli.Command{ fmt.Printf("ls: cannot access '%s': No such file or directory\n", path) continue } - storeObjectMap[so] = objects + storeObjectMap[so] = append(storeObjectMap[so], objects...) } else { var o *types.Object if path == "" { From 906af90db584bb257d5a4e609876f7cec658cfd4 Mon Sep 17 00:00:00 2001 From: JinnyYi Date: Tue, 30 Nov 2021 21:26:35 +0800 Subject: [PATCH 4/5] Fix ls error --- operations/stat.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/operations/stat.go b/operations/stat.go index 36e3e88..69fc493 100644 --- a/operations/stat.go +++ b/operations/stat.go @@ -28,7 +28,8 @@ func (so *SingleOperator) Stat(path string) (o *types.Object, err error) { } if (obj.Mode.IsDir() && strings.HasPrefix(obj.Path, strings.TrimSuffix(path, "/")+"/")) || (!obj.Mode.IsDir() && strings.HasPrefix(obj.Path, strings.TrimSuffix(path, "/")+"/")) { - o = so.store.Create(path, pairs.WithObjectMode(types.ModeDir)) + o = so.store.Create(path) + o.Mode = types.ModeDir err = nil break } From 650aaeb08d009921685cc93be333a680a068091a Mon Sep 17 00:00:00 2001 From: JinnyYi Date: Tue, 30 Nov 2021 21:35:53 +0800 Subject: [PATCH 5/5] Fix ls error --- cmd/byctl/ls.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/byctl/ls.go b/cmd/byctl/ls.go index 795f7ab..37dc33c 100644 --- a/cmd/byctl/ls.go +++ b/cmd/byctl/ls.go @@ -10,7 +10,6 @@ import ( "go.uber.org/zap" "go.beyondstorage.io/beyond-ctl/operations" - "go.beyondstorage.io/v5/pairs" "go.beyondstorage.io/v5/services" "go.beyondstorage.io/v5/types" ) @@ -84,7 +83,8 @@ var lsCmd = &cli.Command{ } else { var o *types.Object if path == "" { - o = store.Create(path, pairs.WithObjectMode(types.ModeDir)) + o = store.Create(path) + o.Mode = types.ModeDir } else { o, err = so.Stat(path) if err != nil {