fix(files): disable auto renaming and clean path from request

This commit is contained in:
hexxa 2022-04-03 15:50:18 +08:00 committed by Hexxa
parent 4de972cb11
commit 0e77cfe335
8 changed files with 108 additions and 87 deletions

View file

@ -79,7 +79,7 @@ func (bs *BoltStore) getUploadInfo(tx *bolt.Tx, userID uint64, itemPath string)
uploadInfoBucket := tx.Bucket([]byte(userUploadNS)) uploadInfoBucket := tx.Bucket([]byte(userUploadNS))
if uploadInfoBucket == nil { if uploadInfoBucket == nil {
return nil, bolt.ErrBucketNotFound return nil, db.ErrBucketNotFound
} }
@ -169,6 +169,14 @@ func (bs *BoltStore) setFileInfo(tx *bolt.Tx, userID uint64, itemPath string, fi
func (bs *BoltStore) AddUploadInfos(userID uint64, tmpPath, filePath string, info *db.FileInfo) error { func (bs *BoltStore) AddUploadInfos(userID uint64, tmpPath, filePath string, info *db.FileInfo) error {
return bs.boltdb.Update(func(tx *bolt.Tx) error { return bs.boltdb.Update(func(tx *bolt.Tx) error {
var err error var err error
_, err = bs.getUploadInfo(tx, userID, tmpPath)
if err == nil {
return db.ErrKeyExisting
} else if !errors.Is(err, db.ErrBucketNotFound) && !errors.Is(err, db.ErrKeyNotFound) {
return err
}
// check space quota // check space quota
userInfo, err := bs.getUserInfo(tx, userID) userInfo, err := bs.getUserInfo(tx, userID)
if err != nil { if err != nil {

View file

@ -31,6 +31,7 @@ var (
ErrInvalidPreferences = errors.New("invalid preferences") ErrInvalidPreferences = errors.New("invalid preferences")
ErrBucketNotFound = errors.New("bucket not found") ErrBucketNotFound = errors.New("bucket not found")
ErrKeyNotFound = errors.New("key not found") ErrKeyNotFound = errors.New("key not found")
ErrKeyExisting = errors.New("key is existing")
ErrCreateExisting = errors.New("create upload info which already exists") ErrCreateExisting = errors.New("create upload info which already exists")
ErrQuota = errors.New("quota limit reached") ErrQuota = errors.New("quota limit reached")

View file

@ -39,7 +39,7 @@ func (h *FileHandlers) genSha1(msg worker.IMsg) error {
buf := make([]byte, 4096) buf := make([]byte, 4096)
_, err = io.CopyBuffer(hasher, f, buf) _, err = io.CopyBuffer(hasher, f, buf)
if err != nil { if err != nil {
return err return fmt.Errorf("faile to copy buffer: %w", err)
} }
sha1Sign := fmt.Sprintf("%x", hasher.Sum(nil)) sha1Sign := fmt.Sprintf("%x", hasher.Sum(nil))
@ -47,5 +47,6 @@ func (h *FileHandlers) genSha1(msg worker.IMsg) error {
if err != nil { if err != nil {
return fmt.Errorf("fail to set sha1: %s", err) return fmt.Errorf("fail to set sha1: %s", err)
} }
return nil return nil
} }

View file

@ -8,7 +8,6 @@ import (
"io" "io"
"net/http" "net/http"
"os" "os"
"path"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
@ -128,26 +127,32 @@ func (h *FileHandlers) Create(c *gin.Context) {
c.JSON(q.ErrResp(c, 500, err)) c.JSON(q.ErrResp(c, 500, err))
return return
} }
userID := c.MustGet(q.UserIDParam).(string)
fsFilePath, err := h.getFSFilePath(userID, req.Path)
if err != nil {
if errors.Is(err, os.ErrExist) {
c.JSON(q.ErrResp(c, 400, err))
} else {
c.JSON(q.ErrResp(c, 500, err))
}
return
}
role := c.MustGet(q.RoleParam).(string) role := c.MustGet(q.RoleParam).(string)
userName := c.MustGet(q.UserParam).(string) userName := c.MustGet(q.UserParam).(string)
if !h.canAccess(userName, role, "create", req.Path) { if !h.canAccess(userName, role, "create", fsFilePath) {
c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied)) c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied))
return return
} }
userID := c.MustGet(q.UserIDParam).(string)
userIDInt, err := strconv.ParseUint(userID, 10, 64) userIDInt, err := strconv.ParseUint(userID, 10, 64)
if err != nil { if err != nil {
c.JSON(q.ErrResp(c, 500, err)) c.JSON(q.ErrResp(c, 500, err))
return return
} }
fsFilePath, err := h.getFSFilePath(userID, req.Path) tmpFilePath := q.UploadPath(userName, fsFilePath)
if err != nil {
c.JSON(q.ErrResp(c, 500, err))
return
}
tmpFilePath := q.UploadPath(userName, req.Path)
if req.FileSize == 0 { if req.FileSize == 0 {
// TODO: limit the number of files with 0 byte // TODO: limit the number of files with 0 byte
@ -168,7 +173,7 @@ func (h *FileHandlers) Create(c *gin.Context) {
return return
} }
err = h.deps.FS().MkdirAll(filepath.Dir(req.Path)) err = h.deps.FS().MkdirAll(filepath.Dir(fsFilePath))
if err != nil { if err != nil {
c.JSON(q.ErrResp(c, 500, err)) c.JSON(q.ErrResp(c, 500, err))
return return
@ -217,6 +222,7 @@ func (h *FileHandlers) Create(c *gin.Context) {
} else { } else {
c.JSON(q.ErrResp(c, 500, err)) c.JSON(q.ErrResp(c, 500, err))
} }
return
} }
locker := h.NewAutoLocker(c, lockName(tmpFilePath)) locker := h.NewAutoLocker(c, lockName(tmpFilePath))
@ -243,6 +249,7 @@ func (h *FileHandlers) Create(c *gin.Context) {
func (h *FileHandlers) Delete(c *gin.Context) { func (h *FileHandlers) Delete(c *gin.Context) {
filePath := c.Query(FilePathQuery) filePath := c.Query(FilePathQuery)
filePath = filepath.Clean(filePath)
if filePath == "" { if filePath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file path"))) c.JSON(q.ErrResp(c, 400, errors.New("invalid file path")))
return return
@ -300,6 +307,7 @@ type MetadataResp struct {
func (h *FileHandlers) Metadata(c *gin.Context) { func (h *FileHandlers) Metadata(c *gin.Context) {
filePath := c.Query(FilePathQuery) filePath := c.Query(FilePathQuery)
filePath = filepath.Clean(filePath)
if filePath == "" { if filePath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file path"))) c.JSON(q.ErrResp(c, 400, errors.New("invalid file path")))
return return
@ -339,14 +347,16 @@ func (h *FileHandlers) Mkdir(c *gin.Context) {
c.JSON(q.ErrResp(c, 400, err)) c.JSON(q.ErrResp(c, 400, err))
return return
} }
role := c.MustGet(q.RoleParam).(string) role := c.MustGet(q.RoleParam).(string)
userName := c.MustGet(q.UserParam).(string) userName := c.MustGet(q.UserParam).(string)
if !h.canAccess(userName, role, "mkdir", req.Path) { dirPath := filepath.Clean(req.Path)
if !h.canAccess(userName, role, "mkdir", dirPath) {
c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied)) c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied))
return return
} }
err := h.deps.FS().MkdirAll(req.Path) err := h.deps.FS().MkdirAll(dirPath)
if err != nil { if err != nil {
c.JSON(q.ErrResp(c, 500, err)) c.JSON(q.ErrResp(c, 500, err))
return return
@ -369,7 +379,10 @@ func (h *FileHandlers) Move(c *gin.Context) {
role := c.MustGet(q.RoleParam).(string) role := c.MustGet(q.RoleParam).(string)
userID := c.MustGet(q.UserIDParam).(string) userID := c.MustGet(q.UserIDParam).(string)
userName := c.MustGet(q.UserParam).(string) userName := c.MustGet(q.UserParam).(string)
if !h.canAccess(userName, role, "move", req.OldPath) || !h.canAccess(userName, role, "move", req.NewPath) {
oldPath := filepath.Clean(req.OldPath)
newPath := filepath.Clean(req.NewPath)
if !h.canAccess(userName, role, "move", oldPath) || !h.canAccess(userName, role, "move", newPath) {
c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied)) c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied))
return return
} }
@ -379,12 +392,12 @@ func (h *FileHandlers) Move(c *gin.Context) {
return return
} }
itemInfo, err := h.deps.FS().Stat(req.OldPath) itemInfo, err := h.deps.FS().Stat(oldPath)
if err != nil { if err != nil {
c.JSON(q.ErrResp(c, 500, err)) c.JSON(q.ErrResp(c, 500, err))
return return
} }
_, err = h.deps.FS().Stat(req.NewPath) _, err = h.deps.FS().Stat(newPath)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
c.JSON(q.ErrResp(c, 500, err)) c.JSON(q.ErrResp(c, 500, err))
return return
@ -394,13 +407,13 @@ func (h *FileHandlers) Move(c *gin.Context) {
return return
} }
err = h.deps.BoltStore().MoveInfos(userIDInt, req.OldPath, req.NewPath, itemInfo.IsDir()) err = h.deps.BoltStore().MoveInfos(userIDInt, oldPath, newPath, itemInfo.IsDir())
if err != nil { if err != nil {
c.JSON(q.ErrResp(c, 500, err)) c.JSON(q.ErrResp(c, 500, err))
return return
} }
err = h.deps.FS().Rename(req.OldPath, req.NewPath) err = h.deps.FS().Rename(oldPath, newPath)
if err != nil { if err != nil {
c.JSON(q.ErrResp(c, 500, err)) c.JSON(q.ErrResp(c, 500, err))
return return
@ -424,7 +437,8 @@ func (h *FileHandlers) UploadChunk(c *gin.Context) {
role := c.MustGet(q.RoleParam).(string) role := c.MustGet(q.RoleParam).(string)
userID := c.MustGet(q.UserIDParam).(string) userID := c.MustGet(q.UserIDParam).(string)
userName := c.MustGet(q.UserParam).(string) userName := c.MustGet(q.UserParam).(string)
if !h.canAccess(userName, role, "upload.chunk", req.Path) { filePath := filepath.Clean(req.Path)
if !h.canAccess(userName, role, "upload.chunk", filePath) {
c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied)) c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied))
return return
} }
@ -446,12 +460,12 @@ func (h *FileHandlers) UploadChunk(c *gin.Context) {
var txErr error var txErr error
var statusCode int var statusCode int
tmpFilePath := q.UploadPath(userName, req.Path) tmpFilePath := q.UploadPath(userName, filePath)
locker := h.NewAutoLocker(c, lockName(tmpFilePath)) locker := h.NewAutoLocker(c, lockName(tmpFilePath))
locker.Exec(func() { locker.Exec(func() {
var err error var err error
_, fileSize, uploaded, err := h.deps.FileInfos().GetUploadInfo(userID, tmpFilePath) fsFilePath, fileSize, uploaded, err := h.deps.FileInfos().GetUploadInfo(userID, tmpFilePath)
if err != nil { if err != nil {
txErr, statusCode = err, 500 txErr, statusCode = err, 500
return return
@ -480,12 +494,6 @@ func (h *FileHandlers) UploadChunk(c *gin.Context) {
// move the file from uploading dir to uploaded dir // move the file from uploading dir to uploaded dir
if uploaded+int64(wrote) == fileSize { if uploaded+int64(wrote) == fileSize {
fsFilePath, err := h.getFSFilePath(userID, req.Path)
if err != nil {
txErr, statusCode = err, 500
return
}
err = h.deps.BoltStore().MoveUploadingInfos(userIDInt, tmpFilePath, fsFilePath) err = h.deps.BoltStore().MoveUploadingInfos(userIDInt, tmpFilePath, fsFilePath)
if err != nil { if err != nil {
txErr, statusCode = err, 500 txErr, statusCode = err, 500
@ -494,7 +502,7 @@ func (h *FileHandlers) UploadChunk(c *gin.Context) {
err = h.deps.FS().Rename(tmpFilePath, fsFilePath) err = h.deps.FS().Rename(tmpFilePath, fsFilePath)
if err != nil { if err != nil {
txErr, statusCode = fmt.Errorf("%s error: %w", req.Path, err), 500 txErr, statusCode = fmt.Errorf("%s error: %w", fsFilePath, err), 500
return return
} }
@ -524,7 +532,7 @@ func (h *FileHandlers) UploadChunk(c *gin.Context) {
return return
} }
c.JSON(200, &UploadStatusResp{ c.JSON(200, &UploadStatusResp{
Path: req.Path, Path: fsFilePath,
IsDir: false, IsDir: false,
FileSize: fileSize, FileSize: fileSize,
Uploaded: uploaded + int64(wrote), Uploaded: uploaded + int64(wrote),
@ -533,6 +541,8 @@ func (h *FileHandlers) UploadChunk(c *gin.Context) {
} }
func (h *FileHandlers) getFSFilePath(userID, fsFilePath string) (string, error) { func (h *FileHandlers) getFSFilePath(userID, fsFilePath string) (string, error) {
fsFilePath = filepath.Clean(fsFilePath)
_, err := h.deps.FS().Stat(fsFilePath) _, err := h.deps.FS().Stat(fsFilePath)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
@ -540,28 +550,32 @@ func (h *FileHandlers) getFSFilePath(userID, fsFilePath string) (string, error)
} }
return "", err return "", err
} }
return "", os.ErrExist
// temporarily disable file auto renaming
// the new name should be returned to the client
// but curently files status is tracked by the original file name between worker and upload manager
// this file exists // this file exists
maxDetect := 1024 // maxDetect := 1024
fsFilePath = filepath.Clean(fsFilePath) // for i := 1; i < maxDetect; i++ {
for i := 1; i < maxDetect; i++ { // dir := path.Dir(fsFilePath)
dir := path.Dir(fsFilePath) // nameAndExt := path.Base(fsFilePath)
nameAndExt := path.Base(fsFilePath) // ext := path.Ext(nameAndExt)
ext := path.Ext(nameAndExt) // fileName := nameAndExt[:len(nameAndExt)-len(ext)]
fileName := nameAndExt[:len(nameAndExt)-len(ext)]
detectPath := path.Join(dir, fmt.Sprintf("%s_%d%s", fileName, i, ext)) // detectPath := path.Join(dir, fmt.Sprintf("%s_%d%s", fileName, i, ext))
_, err := h.deps.FS().Stat(detectPath) // _, err := h.deps.FS().Stat(detectPath)
if err != nil { // if err != nil {
if os.IsNotExist(err) { // if os.IsNotExist(err) {
return detectPath, nil // return detectPath, nil
} else { // } else {
return "", err // return "", err
} // }
} // }
} // }
return "", fmt.Errorf("found more than %d duplicated files", maxDetect) // return "", fmt.Errorf("found more than %d duplicated files", maxDetect)
} }
type UploadStatusResp struct { type UploadStatusResp struct {
@ -573,6 +587,7 @@ type UploadStatusResp struct {
func (h *FileHandlers) UploadStatus(c *gin.Context) { func (h *FileHandlers) UploadStatus(c *gin.Context) {
filePath := c.Query(FilePathQuery) filePath := c.Query(FilePathQuery)
filePath = filepath.Clean(filePath)
if filePath == "" { if filePath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file name"))) c.JSON(q.ErrResp(c, 400, errors.New("invalid file name")))
} }
@ -611,6 +626,7 @@ func (h *FileHandlers) Download(c *gin.Context) {
rangeVal := c.GetHeader(rangeHeader) rangeVal := c.GetHeader(rangeHeader)
ifRangeVal := c.GetHeader(ifRangeHeader) ifRangeVal := c.GetHeader(ifRangeHeader)
filePath := c.Query(FilePathQuery) filePath := c.Query(FilePathQuery)
filePath = filepath.Clean(filePath)
if filePath == "" { if filePath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file name"))) c.JSON(q.ErrResp(c, 400, errors.New("invalid file name")))
return return
@ -771,6 +787,7 @@ func (h *FileHandlers) MergeFileInfos(dirPath string, infos []os.FileInfo) ([]*M
func (h *FileHandlers) List(c *gin.Context) { func (h *FileHandlers) List(c *gin.Context) {
dirPath := c.Query(ListDirQuery) dirPath := c.Query(ListDirQuery)
dirPath = filepath.Clean(dirPath)
if dirPath == "" { if dirPath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("incorrect path name"))) c.JSON(q.ErrResp(c, 400, errors.New("incorrect path name")))
return return
@ -854,6 +871,7 @@ func (h *FileHandlers) ListUploadings(c *gin.Context) {
func (h *FileHandlers) DelUploading(c *gin.Context) { func (h *FileHandlers) DelUploading(c *gin.Context) {
filePath := c.Query(FilePathQuery) filePath := c.Query(FilePathQuery)
filePath = filepath.Clean(filePath)
if filePath == "" { if filePath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file path"))) c.JSON(q.ErrResp(c, 400, errors.New("invalid file path")))
return return
@ -920,21 +938,22 @@ func (h *FileHandlers) AddSharing(c *gin.Context) {
return return
} }
sharingPath := filepath.Clean(req.SharingPath)
// TODO: move canAccess to authedFS // TODO: move canAccess to authedFS
role := c.MustGet(q.RoleParam).(string) role := c.MustGet(q.RoleParam).(string)
userName := c.MustGet(q.UserParam).(string) userName := c.MustGet(q.UserParam).(string)
// op is empty, because users must be admin, or the path belongs to this user // op is empty, because users must be admin, or the path belongs to this user
if !h.canAccess(userName, role, "", req.SharingPath) { if !h.canAccess(userName, role, "", sharingPath) {
c.JSON(q.ErrResp(c, 403, errors.New("forbidden"))) c.JSON(q.ErrResp(c, 403, errors.New("forbidden")))
return return
} }
if req.SharingPath == "" || req.SharingPath == "/" { if sharingPath == "" || sharingPath == "/" {
c.JSON(q.ErrResp(c, 403, errors.New("forbidden"))) c.JSON(q.ErrResp(c, 403, errors.New("forbidden")))
return return
} }
info, err := h.deps.FS().Stat(req.SharingPath) info, err := h.deps.FS().Stat(sharingPath)
if err != nil { if err != nil {
c.JSON(q.ErrResp(c, 500, err)) c.JSON(q.ErrResp(c, 500, err))
return return
@ -943,7 +962,7 @@ func (h *FileHandlers) AddSharing(c *gin.Context) {
return return
} }
err = h.deps.FileInfos().AddSharing(req.SharingPath) err = h.deps.FileInfos().AddSharing(sharingPath)
if err != nil { if err != nil {
c.JSON(q.ErrResp(c, 500, err)) c.JSON(q.ErrResp(c, 500, err))
return return
@ -953,6 +972,7 @@ func (h *FileHandlers) AddSharing(c *gin.Context) {
func (h *FileHandlers) DelSharing(c *gin.Context) { func (h *FileHandlers) DelSharing(c *gin.Context) {
dirPath := c.Query(FilePathQuery) dirPath := c.Query(FilePathQuery)
dirPath = filepath.Clean(dirPath)
if dirPath == "" { if dirPath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file path"))) c.JSON(q.ErrResp(c, 400, errors.New("invalid file path")))
return return
@ -976,6 +996,7 @@ func (h *FileHandlers) DelSharing(c *gin.Context) {
func (h *FileHandlers) IsSharing(c *gin.Context) { func (h *FileHandlers) IsSharing(c *gin.Context) {
dirPath := c.Query(FilePathQuery) dirPath := c.Query(FilePathQuery)
dirPath = filepath.Clean(dirPath)
if dirPath == "" { if dirPath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file path"))) c.JSON(q.ErrResp(c, 400, errors.New("invalid file path")))
return return
@ -1038,20 +1059,21 @@ func (h *FileHandlers) GenerateHash(c *gin.Context) {
return return
} }
if req.FilePath == "" { filePath := filepath.Clean(req.FilePath)
if filePath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file path"))) c.JSON(q.ErrResp(c, 400, errors.New("invalid file path")))
return return
} }
role := c.MustGet(q.RoleParam).(string) role := c.MustGet(q.RoleParam).(string)
userName := c.MustGet(q.UserParam).(string) userName := c.MustGet(q.UserParam).(string)
if !h.canAccess(userName, role, "hash.gen", req.FilePath) { if !h.canAccess(userName, role, "hash.gen", filePath) {
c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied)) c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied))
return return
} }
msg, err := json.Marshal(Sha1Params{ msg, err := json.Marshal(Sha1Params{
FilePath: req.FilePath, FilePath: filePath,
}) })
if err != nil { if err != nil {
c.JSON(q.ErrResp(c, 500, err)) c.JSON(q.ErrResp(c, 500, err))

View file

@ -133,10 +133,12 @@ func ErrResp(c *gin.Context, code int, err error) (int, interface{}) {
} }
func FsRootPath(userName, relFilePath string) string { func FsRootPath(userName, relFilePath string) string {
relFilePath = filepath.Clean(relFilePath)
return filepath.Join(userName, FsRootDir, relFilePath) return filepath.Join(userName, FsRootDir, relFilePath)
} }
func UploadPath(userName, relFilePath string) string { func UploadPath(userName, relFilePath string) string {
relFilePath = filepath.Clean(relFilePath)
return filepath.Join(UploadFolder(userName), fmt.Sprintf("%x", sha1.Sum([]byte(relFilePath)))) return filepath.Join(UploadFolder(userName), fmt.Sprintf("%x", sha1.Sum([]byte(relFilePath))))
} }

View file

@ -48,7 +48,7 @@ func LoadCfg(args *Args) (*gocfg.Cfg, error) {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
return nil, err return nil, err
} else { } else {
fmt.Printf("warning: Database does not exist in (%s), skipped", dbPath) fmt.Printf("warning: Database does not exist in (%s), skipped\n", dbPath)
} }
} }

View file

@ -126,13 +126,8 @@ func TestFileHandlers(t *testing.T) {
"qs/files/dupdir/dup_file1": "12345678", "qs/files/dupdir/dup_file1": "12345678",
"qs/files/dupdir/dup_file2.ext": "12345678", "qs/files/dupdir/dup_file2.ext": "12345678",
} }
renames := map[string]string{
"qs/files/dupdir/dup_file1": "qs/files/dupdir/dup_file1_1",
"qs/files/dupdir/dup_file2.ext": "qs/files/dupdir/dup_file2_1.ext",
}
for filePath, content := range files { for filePath, content := range files {
for i := 0; i < 2; i++ {
assertUploadOK(t, filePath, content, addr, token) assertUploadOK(t, filePath, content, addr, token)
// file operation(deleting tmp file) may be async // file operation(deleting tmp file) may be async
@ -141,31 +136,26 @@ func TestFileHandlers(t *testing.T) {
// TODO: use fs.Stat() to avoid flaky testing... // TODO: use fs.Stat() to avoid flaky testing...
time.Sleep(1000) time.Sleep(1000)
err = fs.Close() assertDownloadOK(t, filePath, content, addr, token)
if err != nil {
t.Fatal(err)
} }
if i == 0 { for filePath, content := range files {
assertDownloadOK(t, filePath, content, addr, token) res, _, _ := cl.Create(filePath, int64(len([]byte(content))))
} else if i == 1 { if res.StatusCode != 400 {
renamedFilePath, ok := renames[filePath] t.Fatal(res.StatusCode)
if !ok {
t.Fatal("new name not found")
}
assertDownloadOK(t, renamedFilePath, content, addr, token)
}
} }
} }
}) })
t.Run("test: Upload-Delete-Upload: fd are closed correctly", func(t *testing.T) { t.Run("test: Upload-Delete-Upload: fd are closed correctly", func(t *testing.T) {
files := map[string]string{ files := map[string]string{
"qs/files/close_Fd/dup_file1": "12345678", "qs/files/close_fd/dup_file1": "12345678",
} }
for i := 0; i < 2; i++ { for i := 0; i < 2; i++ {
for filePath, content := range files { for filePath, content := range files {
time.Sleep(1000)
assertUploadOK(t, filePath, content, addr, token) assertUploadOK(t, filePath, content, addr, token)
// file operation(deleting tmp file) may be async // file operation(deleting tmp file) may be async
@ -185,9 +175,6 @@ func TestFileHandlers(t *testing.T) {
} else if res.StatusCode != 200 { } else if res.StatusCode != 200 {
t.Fatal(res.StatusCode) t.Fatal(res.StatusCode)
} }
// tmpFile fd is closed, so follow must succeed
assertUploadOK(t, filePath, content, addr, token)
} }
} }
}) })
@ -493,7 +480,7 @@ func TestFileHandlers(t *testing.T) {
res, _, errs = userFilesCl.IsSharing(fmt.Sprintf("%s/", dirPath)) res, _, errs = userFilesCl.IsSharing(fmt.Sprintf("%s/", dirPath))
if len(errs) > 0 { if len(errs) > 0 {
t.Fatal(errs) t.Fatal(errs)
} else if res.StatusCode != 404 { } else if res.StatusCode != 200 { // dirPath is cleaned
t.Fatal(res.StatusCode) t.Fatal(res.StatusCode)
} }
} }

Binary file not shown.