fix(files): disable auto renaming and clean path from request

This commit is contained in:
hexxa 2022-04-03 15:50:18 +08:00 committed by Hexxa
parent 4de972cb11
commit 0e77cfe335
8 changed files with 108 additions and 87 deletions

View file

@ -79,7 +79,7 @@ func (bs *BoltStore) getUploadInfo(tx *bolt.Tx, userID uint64, itemPath string)
uploadInfoBucket := tx.Bucket([]byte(userUploadNS))
if uploadInfoBucket == nil {
return nil, bolt.ErrBucketNotFound
return nil, db.ErrBucketNotFound
}
@ -169,6 +169,14 @@ func (bs *BoltStore) setFileInfo(tx *bolt.Tx, userID uint64, itemPath string, fi
func (bs *BoltStore) AddUploadInfos(userID uint64, tmpPath, filePath string, info *db.FileInfo) error {
return bs.boltdb.Update(func(tx *bolt.Tx) error {
var err error
_, err = bs.getUploadInfo(tx, userID, tmpPath)
if err == nil {
return db.ErrKeyExisting
} else if !errors.Is(err, db.ErrBucketNotFound) && !errors.Is(err, db.ErrKeyNotFound) {
return err
}
// check space quota
userInfo, err := bs.getUserInfo(tx, userID)
if err != nil {

View file

@ -31,6 +31,7 @@ var (
ErrInvalidPreferences = errors.New("invalid preferences")
ErrBucketNotFound = errors.New("bucket not found")
ErrKeyNotFound = errors.New("key not found")
ErrKeyExisting = errors.New("key is existing")
ErrCreateExisting = errors.New("create upload info which already exists")
ErrQuota = errors.New("quota limit reached")

View file

@ -39,7 +39,7 @@ func (h *FileHandlers) genSha1(msg worker.IMsg) error {
buf := make([]byte, 4096)
_, err = io.CopyBuffer(hasher, f, buf)
if err != nil {
return err
return fmt.Errorf("faile to copy buffer: %w", err)
}
sha1Sign := fmt.Sprintf("%x", hasher.Sum(nil))
@ -47,5 +47,6 @@ func (h *FileHandlers) genSha1(msg worker.IMsg) error {
if err != nil {
return fmt.Errorf("fail to set sha1: %s", err)
}
return nil
}

View file

@ -8,7 +8,6 @@ import (
"io"
"net/http"
"os"
"path"
"path/filepath"
"strconv"
"strings"
@ -128,26 +127,32 @@ func (h *FileHandlers) Create(c *gin.Context) {
c.JSON(q.ErrResp(c, 500, err))
return
}
userID := c.MustGet(q.UserIDParam).(string)
fsFilePath, err := h.getFSFilePath(userID, req.Path)
if err != nil {
if errors.Is(err, os.ErrExist) {
c.JSON(q.ErrResp(c, 400, err))
} else {
c.JSON(q.ErrResp(c, 500, err))
}
return
}
role := c.MustGet(q.RoleParam).(string)
userName := c.MustGet(q.UserParam).(string)
if !h.canAccess(userName, role, "create", req.Path) {
if !h.canAccess(userName, role, "create", fsFilePath) {
c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied))
return
}
userID := c.MustGet(q.UserIDParam).(string)
userIDInt, err := strconv.ParseUint(userID, 10, 64)
if err != nil {
c.JSON(q.ErrResp(c, 500, err))
return
}
fsFilePath, err := h.getFSFilePath(userID, req.Path)
if err != nil {
c.JSON(q.ErrResp(c, 500, err))
return
}
tmpFilePath := q.UploadPath(userName, req.Path)
tmpFilePath := q.UploadPath(userName, fsFilePath)
if req.FileSize == 0 {
// TODO: limit the number of files with 0 byte
@ -168,7 +173,7 @@ func (h *FileHandlers) Create(c *gin.Context) {
return
}
err = h.deps.FS().MkdirAll(filepath.Dir(req.Path))
err = h.deps.FS().MkdirAll(filepath.Dir(fsFilePath))
if err != nil {
c.JSON(q.ErrResp(c, 500, err))
return
@ -217,6 +222,7 @@ func (h *FileHandlers) Create(c *gin.Context) {
} else {
c.JSON(q.ErrResp(c, 500, err))
}
return
}
locker := h.NewAutoLocker(c, lockName(tmpFilePath))
@ -243,6 +249,7 @@ func (h *FileHandlers) Create(c *gin.Context) {
func (h *FileHandlers) Delete(c *gin.Context) {
filePath := c.Query(FilePathQuery)
filePath = filepath.Clean(filePath)
if filePath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file path")))
return
@ -300,6 +307,7 @@ type MetadataResp struct {
func (h *FileHandlers) Metadata(c *gin.Context) {
filePath := c.Query(FilePathQuery)
filePath = filepath.Clean(filePath)
if filePath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file path")))
return
@ -339,14 +347,16 @@ func (h *FileHandlers) Mkdir(c *gin.Context) {
c.JSON(q.ErrResp(c, 400, err))
return
}
role := c.MustGet(q.RoleParam).(string)
userName := c.MustGet(q.UserParam).(string)
if !h.canAccess(userName, role, "mkdir", req.Path) {
dirPath := filepath.Clean(req.Path)
if !h.canAccess(userName, role, "mkdir", dirPath) {
c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied))
return
}
err := h.deps.FS().MkdirAll(req.Path)
err := h.deps.FS().MkdirAll(dirPath)
if err != nil {
c.JSON(q.ErrResp(c, 500, err))
return
@ -369,7 +379,10 @@ func (h *FileHandlers) Move(c *gin.Context) {
role := c.MustGet(q.RoleParam).(string)
userID := c.MustGet(q.UserIDParam).(string)
userName := c.MustGet(q.UserParam).(string)
if !h.canAccess(userName, role, "move", req.OldPath) || !h.canAccess(userName, role, "move", req.NewPath) {
oldPath := filepath.Clean(req.OldPath)
newPath := filepath.Clean(req.NewPath)
if !h.canAccess(userName, role, "move", oldPath) || !h.canAccess(userName, role, "move", newPath) {
c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied))
return
}
@ -379,12 +392,12 @@ func (h *FileHandlers) Move(c *gin.Context) {
return
}
itemInfo, err := h.deps.FS().Stat(req.OldPath)
itemInfo, err := h.deps.FS().Stat(oldPath)
if err != nil {
c.JSON(q.ErrResp(c, 500, err))
return
}
_, err = h.deps.FS().Stat(req.NewPath)
_, err = h.deps.FS().Stat(newPath)
if err != nil && !os.IsNotExist(err) {
c.JSON(q.ErrResp(c, 500, err))
return
@ -394,13 +407,13 @@ func (h *FileHandlers) Move(c *gin.Context) {
return
}
err = h.deps.BoltStore().MoveInfos(userIDInt, req.OldPath, req.NewPath, itemInfo.IsDir())
err = h.deps.BoltStore().MoveInfos(userIDInt, oldPath, newPath, itemInfo.IsDir())
if err != nil {
c.JSON(q.ErrResp(c, 500, err))
return
}
err = h.deps.FS().Rename(req.OldPath, req.NewPath)
err = h.deps.FS().Rename(oldPath, newPath)
if err != nil {
c.JSON(q.ErrResp(c, 500, err))
return
@ -424,7 +437,8 @@ func (h *FileHandlers) UploadChunk(c *gin.Context) {
role := c.MustGet(q.RoleParam).(string)
userID := c.MustGet(q.UserIDParam).(string)
userName := c.MustGet(q.UserParam).(string)
if !h.canAccess(userName, role, "upload.chunk", req.Path) {
filePath := filepath.Clean(req.Path)
if !h.canAccess(userName, role, "upload.chunk", filePath) {
c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied))
return
}
@ -446,12 +460,12 @@ func (h *FileHandlers) UploadChunk(c *gin.Context) {
var txErr error
var statusCode int
tmpFilePath := q.UploadPath(userName, req.Path)
tmpFilePath := q.UploadPath(userName, filePath)
locker := h.NewAutoLocker(c, lockName(tmpFilePath))
locker.Exec(func() {
var err error
_, fileSize, uploaded, err := h.deps.FileInfos().GetUploadInfo(userID, tmpFilePath)
fsFilePath, fileSize, uploaded, err := h.deps.FileInfos().GetUploadInfo(userID, tmpFilePath)
if err != nil {
txErr, statusCode = err, 500
return
@ -480,12 +494,6 @@ func (h *FileHandlers) UploadChunk(c *gin.Context) {
// move the file from uploading dir to uploaded dir
if uploaded+int64(wrote) == fileSize {
fsFilePath, err := h.getFSFilePath(userID, req.Path)
if err != nil {
txErr, statusCode = err, 500
return
}
err = h.deps.BoltStore().MoveUploadingInfos(userIDInt, tmpFilePath, fsFilePath)
if err != nil {
txErr, statusCode = err, 500
@ -494,7 +502,7 @@ func (h *FileHandlers) UploadChunk(c *gin.Context) {
err = h.deps.FS().Rename(tmpFilePath, fsFilePath)
if err != nil {
txErr, statusCode = fmt.Errorf("%s error: %w", req.Path, err), 500
txErr, statusCode = fmt.Errorf("%s error: %w", fsFilePath, err), 500
return
}
@ -524,7 +532,7 @@ func (h *FileHandlers) UploadChunk(c *gin.Context) {
return
}
c.JSON(200, &UploadStatusResp{
Path: req.Path,
Path: fsFilePath,
IsDir: false,
FileSize: fileSize,
Uploaded: uploaded + int64(wrote),
@ -533,6 +541,8 @@ func (h *FileHandlers) UploadChunk(c *gin.Context) {
}
func (h *FileHandlers) getFSFilePath(userID, fsFilePath string) (string, error) {
fsFilePath = filepath.Clean(fsFilePath)
_, err := h.deps.FS().Stat(fsFilePath)
if err != nil {
if os.IsNotExist(err) {
@ -540,28 +550,32 @@ func (h *FileHandlers) getFSFilePath(userID, fsFilePath string) (string, error)
}
return "", err
}
return "", os.ErrExist
// temporarily disable file auto renaming
// the new name should be returned to the client
// but curently files status is tracked by the original file name between worker and upload manager
// this file exists
maxDetect := 1024
fsFilePath = filepath.Clean(fsFilePath)
for i := 1; i < maxDetect; i++ {
dir := path.Dir(fsFilePath)
nameAndExt := path.Base(fsFilePath)
ext := path.Ext(nameAndExt)
fileName := nameAndExt[:len(nameAndExt)-len(ext)]
// maxDetect := 1024
// for i := 1; i < maxDetect; i++ {
// dir := path.Dir(fsFilePath)
// nameAndExt := path.Base(fsFilePath)
// ext := path.Ext(nameAndExt)
// fileName := nameAndExt[:len(nameAndExt)-len(ext)]
detectPath := path.Join(dir, fmt.Sprintf("%s_%d%s", fileName, i, ext))
_, err := h.deps.FS().Stat(detectPath)
if err != nil {
if os.IsNotExist(err) {
return detectPath, nil
} else {
return "", err
}
}
}
// detectPath := path.Join(dir, fmt.Sprintf("%s_%d%s", fileName, i, ext))
// _, err := h.deps.FS().Stat(detectPath)
// if err != nil {
// if os.IsNotExist(err) {
// return detectPath, nil
// } else {
// return "", err
// }
// }
// }
return "", fmt.Errorf("found more than %d duplicated files", maxDetect)
// return "", fmt.Errorf("found more than %d duplicated files", maxDetect)
}
type UploadStatusResp struct {
@ -573,6 +587,7 @@ type UploadStatusResp struct {
func (h *FileHandlers) UploadStatus(c *gin.Context) {
filePath := c.Query(FilePathQuery)
filePath = filepath.Clean(filePath)
if filePath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file name")))
}
@ -611,6 +626,7 @@ func (h *FileHandlers) Download(c *gin.Context) {
rangeVal := c.GetHeader(rangeHeader)
ifRangeVal := c.GetHeader(ifRangeHeader)
filePath := c.Query(FilePathQuery)
filePath = filepath.Clean(filePath)
if filePath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file name")))
return
@ -771,6 +787,7 @@ func (h *FileHandlers) MergeFileInfos(dirPath string, infos []os.FileInfo) ([]*M
func (h *FileHandlers) List(c *gin.Context) {
dirPath := c.Query(ListDirQuery)
dirPath = filepath.Clean(dirPath)
if dirPath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("incorrect path name")))
return
@ -854,6 +871,7 @@ func (h *FileHandlers) ListUploadings(c *gin.Context) {
func (h *FileHandlers) DelUploading(c *gin.Context) {
filePath := c.Query(FilePathQuery)
filePath = filepath.Clean(filePath)
if filePath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file path")))
return
@ -920,21 +938,22 @@ func (h *FileHandlers) AddSharing(c *gin.Context) {
return
}
sharingPath := filepath.Clean(req.SharingPath)
// TODO: move canAccess to authedFS
role := c.MustGet(q.RoleParam).(string)
userName := c.MustGet(q.UserParam).(string)
// op is empty, because users must be admin, or the path belongs to this user
if !h.canAccess(userName, role, "", req.SharingPath) {
if !h.canAccess(userName, role, "", sharingPath) {
c.JSON(q.ErrResp(c, 403, errors.New("forbidden")))
return
}
if req.SharingPath == "" || req.SharingPath == "/" {
if sharingPath == "" || sharingPath == "/" {
c.JSON(q.ErrResp(c, 403, errors.New("forbidden")))
return
}
info, err := h.deps.FS().Stat(req.SharingPath)
info, err := h.deps.FS().Stat(sharingPath)
if err != nil {
c.JSON(q.ErrResp(c, 500, err))
return
@ -943,7 +962,7 @@ func (h *FileHandlers) AddSharing(c *gin.Context) {
return
}
err = h.deps.FileInfos().AddSharing(req.SharingPath)
err = h.deps.FileInfos().AddSharing(sharingPath)
if err != nil {
c.JSON(q.ErrResp(c, 500, err))
return
@ -953,6 +972,7 @@ func (h *FileHandlers) AddSharing(c *gin.Context) {
func (h *FileHandlers) DelSharing(c *gin.Context) {
dirPath := c.Query(FilePathQuery)
dirPath = filepath.Clean(dirPath)
if dirPath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file path")))
return
@ -976,6 +996,7 @@ func (h *FileHandlers) DelSharing(c *gin.Context) {
func (h *FileHandlers) IsSharing(c *gin.Context) {
dirPath := c.Query(FilePathQuery)
dirPath = filepath.Clean(dirPath)
if dirPath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file path")))
return
@ -1038,20 +1059,21 @@ func (h *FileHandlers) GenerateHash(c *gin.Context) {
return
}
if req.FilePath == "" {
filePath := filepath.Clean(req.FilePath)
if filePath == "" {
c.JSON(q.ErrResp(c, 400, errors.New("invalid file path")))
return
}
role := c.MustGet(q.RoleParam).(string)
userName := c.MustGet(q.UserParam).(string)
if !h.canAccess(userName, role, "hash.gen", req.FilePath) {
if !h.canAccess(userName, role, "hash.gen", filePath) {
c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied))
return
}
msg, err := json.Marshal(Sha1Params{
FilePath: req.FilePath,
FilePath: filePath,
})
if err != nil {
c.JSON(q.ErrResp(c, 500, err))

View file

@ -133,10 +133,12 @@ func ErrResp(c *gin.Context, code int, err error) (int, interface{}) {
}
func FsRootPath(userName, relFilePath string) string {
relFilePath = filepath.Clean(relFilePath)
return filepath.Join(userName, FsRootDir, relFilePath)
}
func UploadPath(userName, relFilePath string) string {
relFilePath = filepath.Clean(relFilePath)
return filepath.Join(UploadFolder(userName), fmt.Sprintf("%x", sha1.Sum([]byte(relFilePath))))
}

View file

@ -48,7 +48,7 @@ func LoadCfg(args *Args) (*gocfg.Cfg, error) {
if !os.IsNotExist(err) {
return nil, err
} else {
fmt.Printf("warning: Database does not exist in (%s), skipped", dbPath)
fmt.Printf("warning: Database does not exist in (%s), skipped\n", dbPath)
}
}

View file

@ -126,46 +126,36 @@ func TestFileHandlers(t *testing.T) {
"qs/files/dupdir/dup_file1": "12345678",
"qs/files/dupdir/dup_file2.ext": "12345678",
}
renames := map[string]string{
"qs/files/dupdir/dup_file1": "qs/files/dupdir/dup_file1_1",
"qs/files/dupdir/dup_file2.ext": "qs/files/dupdir/dup_file2_1.ext",
for filePath, content := range files {
assertUploadOK(t, filePath, content, addr, token)
// file operation(deleting tmp file) may be async
// so creating tmp file in the 2nd time may conflict with the first time if it is not deleted yet
// checking file until it is deleted
// TODO: use fs.Stat() to avoid flaky testing...
time.Sleep(1000)
assertDownloadOK(t, filePath, content, addr, token)
}
for filePath, content := range files {
for i := 0; i < 2; i++ {
assertUploadOK(t, filePath, content, addr, token)
// file operation(deleting tmp file) may be async
// so creating tmp file in the 2nd time may conflict with the first time if it is not deleted yet
// checking file until it is deleted
// TODO: use fs.Stat() to avoid flaky testing...
time.Sleep(1000)
err = fs.Close()
if err != nil {
t.Fatal(err)
}
if i == 0 {
assertDownloadOK(t, filePath, content, addr, token)
} else if i == 1 {
renamedFilePath, ok := renames[filePath]
if !ok {
t.Fatal("new name not found")
}
assertDownloadOK(t, renamedFilePath, content, addr, token)
}
res, _, _ := cl.Create(filePath, int64(len([]byte(content))))
if res.StatusCode != 400 {
t.Fatal(res.StatusCode)
}
}
})
t.Run("test: Upload-Delete-Upload: fd are closed correctly", func(t *testing.T) {
files := map[string]string{
"qs/files/close_Fd/dup_file1": "12345678",
"qs/files/close_fd/dup_file1": "12345678",
}
for i := 0; i < 2; i++ {
for filePath, content := range files {
time.Sleep(1000)
assertUploadOK(t, filePath, content, addr, token)
// file operation(deleting tmp file) may be async
@ -185,9 +175,6 @@ func TestFileHandlers(t *testing.T) {
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
// tmpFile fd is closed, so follow must succeed
assertUploadOK(t, filePath, content, addr, token)
}
}
})
@ -493,7 +480,7 @@ func TestFileHandlers(t *testing.T) {
res, _, errs = userFilesCl.IsSharing(fmt.Sprintf("%s/", dirPath))
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 404 {
} else if res.StatusCode != 200 { // dirPath is cleaned
t.Fatal(res.StatusCode)
}
}

Binary file not shown.