quickshare/src/server/server_files_test.go
2022-09-25 12:14:55 +08:00

1052 lines
26 KiB
Go

package server
import (
"encoding/base64"
"fmt"
"math/rand"
"os"
"path/filepath"
"sync"
"testing"
"time"
"github.com/ihexxa/quickshare/src/client"
"github.com/ihexxa/quickshare/src/db"
q "github.com/ihexxa/quickshare/src/handlers"
"github.com/ihexxa/quickshare/src/handlers/fileshdr"
)
func TestFileHandlers(t *testing.T) {
addr := "http://127.0.0.1:8686"
rootPath := "tmpTestData"
config := `{
"users": {
"enableAuth": true,
"minUserNameLen": 2,
"minPwdLen": 4,
"captchaEnabled": false,
"uploadSpeedLimit": 409600,
"downloadSpeedLimit": 409600,
"spaceLimit": 1000,
"limiterCapacity": 1000,
"limiterCyc": 1000,
"predefinedUsers": [
{
"name": "demo",
"pwd": "Quicksh@re",
"role": "user"
}
]
},
"server": {
"debug": true,
"host": "127.0.0.1",
"initFileIndex": true
},
"fs": {
"root": "tmpTestData"
},
"db": {
"dbPath": "tmpTestData/quickshare"
}
}`
adminName := "qs"
adminPwd := "quicksh@re"
setUpEnv(t, rootPath, adminName, adminPwd)
defer os.RemoveAll(rootPath)
srv := startTestServer(config)
defer srv.Shutdown()
fs := srv.depsFS()
if !isServerReady(addr) {
t.Fatal("fail to start server")
}
usersCl := client.NewUsersClient(addr)
resp, _, errs := usersCl.Login(adminName, adminPwd)
if len(errs) > 0 {
t.Fatal(errs)
} else if resp.StatusCode != 200 {
t.Fatal(resp.StatusCode)
}
token := client.GetCookie(resp.Cookies(), q.TokenCookie)
adminFilesClient := client.NewFilesClient(addr, token)
var err error
// TODO: remove all files under home folder before testing
// or the count of files is incorrect
t.Run("ListHome", func(t *testing.T) {
files := map[string]string{
"qs/files/home_file1": "12345678",
"qs/files/home_file2": "12345678",
}
for filePath, content := range files {
assertUploadOK(t, filePath, content, addr, token)
err = fs.Sync()
if err != nil {
t.Fatal(err)
}
}
resp, lhResp, errs := adminFilesClient.ListHome()
if len(errs) > 0 {
t.Fatal(errs)
} else if resp.StatusCode != 200 {
t.Fatal(resp.StatusCode)
} else if lhResp.Cwd != "qs/files" {
t.Fatalf("incorrect ListHome cwd %s", lhResp.Cwd)
} else if len(lhResp.Metadatas) != len(files) {
for _, metadata := range lhResp.Metadatas {
fmt.Printf("%v\n", metadata)
}
t.Fatalf("incorrect ListHome content %d", len(lhResp.Metadatas))
}
infos := map[string]*fileshdr.MetadataResp{}
for _, metadata := range lhResp.Metadatas {
infos[metadata.Name] = metadata
}
if infos["home_file1"].Size != int64(len(files["qs/files/home_file1"])) {
t.Fatalf("incorrect file size %d", infos["home_file1"].Size)
} else if infos["home_file1"].IsDir {
t.Fatal("incorrect item type")
}
if infos["home_file2"].Size != int64(len(files["qs/files/home_file2"])) {
t.Fatalf("incorrect file size %d", infos["home_file2"].Size)
} else if infos["home_file2"].IsDir {
t.Fatal("incorrect item type")
}
})
t.Run("test uploading files with duplicated names", func(t *testing.T) {
files := map[string]string{
"qs/files/dupdir/dup_file1": "12345678",
"qs/files/dupdir/dup_file2.ext": "12345678",
}
for filePath, content := range files {
assertUploadOK(t, filePath, content, addr, token)
// file operation(deleting tmp file) may be async
// so creating tmp file in the 2nd time may conflict with the first time if it is not deleted yet
// checking file until it is deleted
// TODO: use fs.Stat() to avoid flaky testing...
time.Sleep(1000)
assertDownloadOK(t, filePath, content, addr, token)
}
for filePath, content := range files {
res, _, _ := adminFilesClient.Create(filePath, int64(len([]byte(content))))
if res.StatusCode != 400 {
t.Fatal(res.StatusCode)
}
}
})
t.Run("test: Upload-Delete-Upload: fd are closed correctly", func(t *testing.T) {
files := map[string]string{
"qs/files/close_fd/dup_file1": "12345678",
}
for i := 0; i < 2; i++ {
for filePath, content := range files {
time.Sleep(1000)
assertUploadOK(t, filePath, content, addr, token)
// file operation(deleting tmp file) may be async
// so creating tmp file in the 2nd time may conflict with the first time if it is not deleted yet
// checking file until it is deleted
// TODO: use fs.Stat() to avoid flaky testing...
time.Sleep(1000)
err = fs.Close()
if err != nil {
t.Fatal(err)
}
res, _, errs := adminFilesClient.Delete(filePath)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
}
}
})
t.Run("test files APIs: Create-UploadChunk-UploadStatus-Metadata-Delete", func(t *testing.T) {
for filePath, content := range map[string]string{
"qs/files/path1/empty": "",
"qs/files/path1/f1.md": "1111 1111 1111 1111",
"qs/files/path1/path2/f2.md": "1010 1010 1111 0000 0010",
} {
fileSize := int64(len([]byte(content)))
// create a file
res, _, errs := adminFilesClient.Create(filePath, fileSize)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
// check uploading file
// empty file is not created under the uploading folder
if fileSize != 0 {
uploadFilePath := q.UploadPath(adminName, filePath)
info, err := fs.Stat(uploadFilePath)
if err != nil {
t.Fatal(err)
} else if info.Name() != filepath.Base(uploadFilePath) {
t.Fatal(info.Name(), filepath.Base(uploadFilePath))
}
}
// upload a chunk
i := 0
contentBytes := []byte(content)
for i < len(contentBytes) {
right := i + rand.Intn(3) + 1
if right > len(contentBytes) {
right = len(contentBytes)
}
chunk := contentBytes[i:right]
chunkBase64 := base64.StdEncoding.EncodeToString(chunk)
res, _, errs = adminFilesClient.UploadChunk(filePath, chunkBase64, int64(i))
i = right
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
if int64(i) != fileSize {
_, statusResp, errs := adminFilesClient.UploadStatus(filePath)
if len(errs) > 0 {
t.Fatal(errs)
} else if statusResp.Path != filePath ||
statusResp.IsDir ||
statusResp.FileSize != fileSize ||
statusResp.Uploaded != int64(i) {
t.Fatal("incorrect uploadinfo info", statusResp)
}
}
}
err = fs.Sync()
if err != nil {
t.Fatal(err)
}
// check uploaded file
// fsFilePath := filepath.Join("0", filePath)
info, err := fs.Stat(filePath)
if err != nil {
t.Fatal(err)
} else if info.Name() != filepath.Base(filePath) {
t.Fatal(info.Name(), filepath.Base(filePath))
}
// metadata
_, mRes, errs := adminFilesClient.Metadata(filePath)
if len(errs) > 0 {
t.Fatal(errs)
} else if mRes.Name != info.Name() ||
mRes.IsDir != info.IsDir() ||
mRes.Size != info.Size() {
// TODO: modTime is not checked
t.Fatal("incorrect uploaded info", mRes)
}
// delete file
res, _, errs = adminFilesClient.Delete(filePath)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
}
})
t.Run("test dirs APIs: Mkdir-Create-UploadChunk-List", func(t *testing.T) {
for dirPath, files := range map[string]map[string]string{
"qs/files/dir/path1": map[string]string{
"f1.md": "11111",
"f2.md": "22222222222",
},
"qs/files/dir/path2/path2": map[string]string{
"f3.md": "3333333",
},
} {
res, _, errs := adminFilesClient.Mkdir(dirPath)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
for fileName, content := range files {
filePath := filepath.Join(dirPath, fileName)
assertUploadOK(t, filePath, content, addr, token)
}
err = fs.Sync()
if err != nil {
t.Fatal(err)
}
_, lResp, errs := adminFilesClient.List(dirPath)
if len(errs) > 0 {
t.Fatal(errs)
}
for _, metadata := range lResp.Metadatas {
content, ok := files[metadata.Name]
if !ok {
t.Fatalf("%s not found", metadata.Name)
} else if int64(len(content)) != metadata.Size {
t.Fatalf("size not match %d %d \n", len(content), metadata.Size)
}
}
}
})
t.Run("test operation APIs: Mkdir-Create-UploadChunk-Move-List", func(t *testing.T) {
srcDir := "qs/files/move/src"
dstDir := "qs/files/move/dst"
for _, dirPath := range []string{srcDir, dstDir} {
res, _, errs := adminFilesClient.Mkdir(dirPath)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
}
files := map[string]string{
"f1.md": "111",
"f2.md": "22222",
}
for fileName, content := range files {
oldPath := filepath.Join(srcDir, fileName)
newPath := filepath.Join(dstDir, fileName)
// fileSize := int64(len([]byte(content)))
assertUploadOK(t, oldPath, content, addr, token)
res, _, errs := adminFilesClient.Move(oldPath, newPath)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
}
err = fs.Sync()
if err != nil {
t.Fatal(err)
}
_, lResp, errs := adminFilesClient.List(dstDir)
if len(errs) > 0 {
t.Fatal(errs)
}
for _, metadata := range lResp.Metadatas {
content, ok := files[metadata.Name]
if !ok {
t.Fatalf("%s not found", metadata.Name)
} else if int64(len(content)) != metadata.Size {
t.Fatalf("size not match %d %d \n", len(content), metadata.Size)
}
}
for _, dirPath := range []string{srcDir, dstDir} {
res, _, errs := adminFilesClient.Delete(dirPath)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
}
})
t.Run("test download APIs: Download(normal, ranges)", func(t *testing.T) {
for filePath, content := range map[string]string{
"qs/files/download/path1/f1": "123456",
"qs/files/download/path1/path2": "12345678",
} {
assertUploadOK(t, filePath, content, addr, token)
err = fs.Sync()
if err != nil {
t.Fatal(err)
}
assertDownloadOK(t, filePath, content, addr, token)
}
})
t.Run("test sharing APIs: Upload-AddSharing-ListSharings-IsSharing-List-Download-DelSharing-ListSharings", func(t *testing.T) {
files := map[string]string{
"qs/files/sharing/path1/f1": "123456",
"qs/files/sharing/path2/f2": "12345678",
}
for filePath, content := range files {
assertUploadOK(t, filePath, content, addr, token)
err = fs.Sync()
if err != nil {
t.Fatal(err)
}
}
userUsersCl := client.NewUsersClient(addr)
resp, _, errs := userUsersCl.Login("demo", "Quicksh@re")
if len(errs) > 0 {
t.Fatal(errs)
} else if resp.StatusCode != 200 {
t.Fatal(resp.StatusCode)
}
userUsersToken := client.GetCookie(resp.Cookies(), q.TokenCookie)
userFilesCl := client.NewFilesClient(addr, userUsersToken)
for i := 0; i < 2; i++ {
// add sharings
sharedPaths := map[string]bool{}
for filePath := range files {
dirPath := filepath.Dir(filePath)
sharedPaths[dirPath] = true
res, _, errs := adminFilesClient.AddSharing(dirPath)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
res, _, errs = adminFilesClient.AddSharing(filePath)
if res.StatusCode != 400 {
t.Fatal(res.StatusCode)
}
res, _, errs = adminFilesClient.AddSharing(filepath.Join(filePath, "not_exist"))
if res.StatusCode != 500 {
t.Fatal(res.StatusCode)
}
}
res, _, errs := adminFilesClient.AddSharing("/")
if res.StatusCode != 403 {
t.Fatal(res.StatusCode)
}
// check listSharings
res, shRes, errs := adminFilesClient.ListSharingIDs()
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
} else if len(shRes.IDs) != len(sharedPaths) {
t.Fatal("shared size not match")
}
for dirPath, shareID := range shRes.IDs {
if !sharedPaths[dirPath] {
t.Fatalf("sharing %s not found", dirPath)
}
// check getShareDir
res, sharingDir, errs := userFilesCl.GetSharingDir(shareID)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
} else if sharingDir != dirPath {
t.Fatalf("sharing path not equal: got(%s) (%s)", sharingDir, dirPath)
}
}
fmt.Println("\n\n\n", shRes.IDs)
// check isSharing
for dirPath := range sharedPaths {
res, _, errs := userFilesCl.IsSharing(dirPath)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
res, _, errs = userFilesCl.IsSharing(fmt.Sprintf("%s/", dirPath))
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 { // dirPath is cleaned
t.Fatal(res.StatusCode)
}
}
// check listing
for dirPath := range shRes.IDs {
res, lsResp, errs := userFilesCl.List(dirPath)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
if lsResp.Cwd != dirPath {
t.Fatalf("list sharing folder: incorrect cwd (%s)", lsResp.Cwd)
}
if len(lsResp.Metadatas) != 1 {
t.Fatalf("list sharing folder: incorrect metadata size (%d)", len(lsResp.Metadatas))
}
}
// check downloading
for filePath, content := range files {
assertDownloadOK(t, filePath, content, addr, userUsersToken)
}
// delete sharing
for filePath := range files {
dirPath := filepath.Dir(filePath)
res, _, errs := adminFilesClient.DelSharing(dirPath)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
}
// check listSharings
res, shRes, errs = adminFilesClient.ListSharingIDs()
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
if len(shRes.IDs) > 0 {
t.Fatalf("sharings should be deleted len(%d)", len(shRes.IDs))
}
// check isSharing
for dirPath := range sharedPaths {
res, _, errs := userFilesCl.IsSharing(dirPath)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 404 {
t.Fatal(res.StatusCode)
}
}
// check rejecting listing
for dirPath, _ := range shRes.IDs {
res, _, errs := userFilesCl.List(dirPath)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 403 {
t.Fatal(res.StatusCode)
}
}
// check rejecting downloading
for filePath, _ := range files {
res, _, errs = userFilesCl.Download(filePath, map[string]string{})
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 403 {
t.Fatal(res.StatusCode)
}
}
}
})
t.Run("test concurrently uploading & downloading", func(t *testing.T) {
type mockFile struct {
FilePath string
Content string
}
wg := &sync.WaitGroup{}
startClient := func(files []*mockFile) {
for i := 0; i < 5; i++ {
for _, file := range files {
if !assertUploadOK(t, fmt.Sprintf("%s_%d", file.FilePath, i), file.Content, addr, token) {
break
}
err = fs.Sync()
if err != nil {
t.Fatal(err)
}
if !assertDownloadOK(t, fmt.Sprintf("%s_%d", file.FilePath, i), file.Content, addr, token) {
break
}
}
}
wg.Done()
}
for _, clientFiles := range [][]*mockFile{
[]*mockFile{
&mockFile{"concurrent/p0/f0", "00"},
&mockFile{"concurrent/f0.md", "0000 0000 0000 0"},
},
[]*mockFile{
&mockFile{"concurrent/p1/f1", "11"},
&mockFile{"concurrent/f1.md", "1111 1111 1"},
},
[]*mockFile{
&mockFile{"concurrent/p2/f2", "22"},
&mockFile{"concurrent/f2.md", "222"},
},
} {
wg.Add(1)
go startClient(clientFiles)
}
wg.Wait()
})
t.Run("test uploading APIs: ListUploadings, Create, ListUploadings, DelUploading", func(t *testing.T) {
// it should return no error even no file is uploaded
res, lResp, errs := adminFilesClient.ListUploadings()
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
files := map[string]string{
"qs/files/uploadings/path1/f1": "123456",
"qs/files/uploadings/path1/path2": "12345678",
}
for filePath, content := range files {
fileSize := int64(len([]byte(content)))
res, _, errs := adminFilesClient.Create(filePath, fileSize)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
}
res, lResp, errs = adminFilesClient.ListUploadings()
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
gotInfos := map[string]*db.UploadInfo{}
for _, info := range lResp.UploadInfos {
gotInfos[info.RealFilePath] = info
}
for filePath, content := range files {
info, ok := gotInfos[filePath]
if !ok {
t.Fatalf("uploading(%s) not found", filePath)
} else if info.Uploaded != 0 {
t.Fatalf("uploading(%s) uploaded is not correct", filePath)
} else if info.Size != int64(len([]byte(content))) {
t.Fatalf("uploading(%s) size is not correct", filePath)
}
}
for filePath := range files {
res, _, errs := adminFilesClient.DelUploading(filePath)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
}
res, lResp, errs = adminFilesClient.ListUploadings()
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
} else if len(lResp.UploadInfos) != 0 {
t.Fatalf("info is not deleted, info len(%+v)", lResp.UploadInfos)
}
})
t.Run("test uploading APIs: Create, Stop, UploadChunk", func(t *testing.T) {
// cl := client.NewFilesClient(addr)
files := map[string]string{
"qs/files/uploadings1/path1/f1": "12345678",
}
for filePath, content := range files {
fileSize := int64(len([]byte(content)))
res, _, errs := adminFilesClient.Create(filePath, fileSize)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
chunks := [][]byte{
[]byte(content)[:fileSize/2],
[]byte(content)[fileSize/2:],
}
offset := int64(0)
for _, chunk := range chunks {
base64Content := base64.StdEncoding.EncodeToString(chunk)
res, bodyStr, errs := adminFilesClient.UploadChunk(filePath, base64Content, offset)
offset += int64(len(chunk))
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(fmt.Sprintln(res.StatusCode, bodyStr))
}
}
err = fs.Sync()
if err != nil {
t.Fatal(err)
}
// metadata
_, mRes, errs := adminFilesClient.Metadata(filePath)
if len(errs) > 0 {
t.Fatal(errs)
} else if mRes.Size != fileSize {
t.Fatal("incorrect uploaded size", mRes)
}
assertDownloadOK(t, filePath, content, addr, token)
}
})
t.Run("test uploading APIs: Create and UploadChunk randomly", func(t *testing.T) {
// cl := client.NewFilesClient(addr)
files := map[string]string{
"qs/files/uploadings/random/path1/f1": "12345678",
"qs/files/uploadings/random/path1/f2": "87654321",
"qs/files/uploadings/random/path1/f3": "17654321",
}
for filePath, content := range files {
fileSize := int64(len([]byte(content)))
res, _, errs := adminFilesClient.Create(filePath, fileSize)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
}
for filePath, content := range files {
fileSize := int64(len([]byte(content)))
chunks := [][]byte{
[]byte(content)[:fileSize/2],
[]byte(content)[fileSize/2:],
}
offset := int64(0)
for _, chunk := range chunks {
base64Content := base64.StdEncoding.EncodeToString(chunk)
res, _, errs := adminFilesClient.UploadChunk(filePath, base64Content, offset)
offset += int64(len(chunk))
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
}
err = fs.Sync()
if err != nil {
t.Fatal(err)
}
// metadata
_, mRes, errs := adminFilesClient.Metadata(filePath)
if len(errs) > 0 {
t.Fatal(errs)
} else if mRes.Size != fileSize {
t.Fatal("incorrect uploaded size", mRes)
}
isEqual, err := compareFileContent(fs, "0", filePath, content)
if err != nil {
t.Fatalf("err comparing content: %s", err)
} else if !isEqual {
t.Fatalf("file content not equal: %s", filePath)
}
assertDownloadOK(t, filePath, content, addr, token)
}
})
t.Run("test folder moving: Mkdir-Create-UploadChunk-AddSharing-Move-IsSharing-List", func(t *testing.T) {
srcDir := "qs/files/folder/move/src"
dstDir := "qs/files/folder/move/dst"
res, _, errs := adminFilesClient.Mkdir(srcDir)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
files := map[string]string{
"f1.md": "111",
"f2.md": "22222",
}
for fileName, content := range files {
oldPath := filepath.Join(srcDir, fileName)
assertUploadOK(t, oldPath, content, addr, token)
}
res, _, errs = adminFilesClient.AddSharing(srcDir)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
res, _, errs = adminFilesClient.Move(srcDir, dstDir)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 {
t.Fatal(res.StatusCode)
}
res, _, errs = adminFilesClient.IsSharing(dstDir)
if len(errs) > 0 {
t.Fatal(errs)
} else if res.StatusCode != 200 { // should still be in sharing
t.Fatal(res.StatusCode)
}
err = fs.Sync()
if err != nil {
t.Fatal(err)
}
_, lResp, errs := adminFilesClient.List(dstDir)
if len(errs) > 0 {
t.Fatal(errs)
}
for _, metadata := range lResp.Metadatas {
content, ok := files[metadata.Name]
if !ok {
t.Fatalf("%s not found", metadata.Name)
} else if int64(len(content)) != metadata.Size {
t.Fatalf("size not match %d %d \n", len(content), metadata.Size)
}
}
})
compareSearchResults := func(expectedPaths map[string]bool, searchItemsResp []string) bool {
if len(expectedPaths) != len(searchItemsResp) {
return false
}
results := map[string]bool{}
for _, result := range searchItemsResp {
results[result] = true
}
for got := range expectedPaths {
if !results[got] {
return false
}
}
return true
}
t.Run("SearchItems", func(t *testing.T) {
path1 := "qs/files/search/keyword1keyword2/file"
path1Dir := "qs/files/search/keyword1keyword2"
path2 := "qs/files/search/path/keyword1keyword2"
path22 := "qs/files/search/new_path/keyword1keyword2"
path3 := "qs/files/search/normal_file"
files := map[string]string{
path1: "12345678",
path2: "12345678",
path3: "12345678",
}
expected := map[string]bool{
path1Dir: true,
path2: true,
}
toDelete := map[string]bool{
path1Dir: true,
}
afterDeleted := map[string]bool{
path2: true,
}
toMove := map[string]string{
path2: path22,
}
afterMoved := map[string]bool{
path22: true,
}
for filePath, content := range files {
assertUploadOK(t, filePath, content, addr, token)
err = fs.Sync()
if err != nil {
t.Fatal(err)
}
}
keywords := []string{"keyword1", "keyword2"}
resp, searchItemsResp, errs := adminFilesClient.SearchItems(keywords)
if len(errs) > 0 {
t.Fatal(errs)
} else if resp.StatusCode != 200 {
t.Fatal(resp.StatusCode)
}
if !compareSearchResults(expected, searchItemsResp.Results) {
fmt.Printf("expected(%+v) got(%+v)", expected, searchItemsResp.Results)
t.Fatal("search result not match")
}
userFilesClient, err := loginFilesClient(addr, "demo", "Quicksh@re")
if err != nil {
t.Fatal(err)
}
resp, searchItemsResp, errs = userFilesClient.SearchItems(keywords)
if len(errs) > 0 {
t.Fatal(errs)
} else if resp.StatusCode != 200 {
t.Fatal(resp.StatusCode)
} else if len(searchItemsResp.Results) > 0 {
t.Fatal("shoud return empty results")
}
// delete paths
for pathname := range toDelete {
resp, _, errs := adminFilesClient.Delete(pathname)
if len(errs) > 0 {
t.Fatal(errs)
} else if resp.StatusCode != 200 {
t.Fatal(resp.StatusCode)
}
}
resp, searchItemsResp, errs = adminFilesClient.SearchItems(keywords)
if len(errs) > 0 {
t.Fatal(errs)
} else if resp.StatusCode != 200 {
t.Fatal(resp.StatusCode)
}
if !compareSearchResults(afterDeleted, searchItemsResp.Results) {
fmt.Printf("expected(%+v) got(%+v)", afterDeleted, searchItemsResp.Results)
t.Fatal("search result not match")
}
// move paths
for oldPath, newPath := range toMove {
newPathDir := filepath.Dir(newPath)
resp, _, errs := adminFilesClient.Mkdir(newPathDir)
if len(errs) > 0 {
t.Fatal(errs)
} else if resp.StatusCode != 200 {
t.Fatal(resp.StatusCode)
}
resp, _, errs = adminFilesClient.Move(oldPath, newPath)
if len(errs) > 0 {
t.Fatal(errs)
} else if resp.StatusCode != 200 {
t.Fatal(resp.StatusCode)
}
}
resp, searchItemsResp, errs = adminFilesClient.SearchItems(keywords)
if len(errs) > 0 {
t.Fatal(errs)
} else if resp.StatusCode != 200 {
t.Fatal(resp.StatusCode)
}
if !compareSearchResults(afterMoved, searchItemsResp.Results) {
fmt.Printf("expected(%+v) got(%+v)", afterMoved, searchItemsResp.Results)
t.Fatal("search result not match")
}
})
t.Run("Reindexing", func(t *testing.T) {
newFiles := map[string]bool{
"qs/files/reindexing/reindexkey": true,
"qs/files/reindexing/pathname/reindexkey": true,
}
for newFilePath := range newFiles {
newFileDir := filepath.Dir(newFilePath)
err := fs.MkdirAll(newFileDir)
if err != nil {
t.Fatal(err)
}
err = fs.Create(newFilePath)
if err != nil {
t.Fatal(err)
}
}
fs.Sync()
resp, _, errs := adminFilesClient.Reindex()
if len(errs) > 0 {
t.Fatal(errs)
} else if resp.StatusCode != 200 {
t.Fatal(resp.StatusCode)
}
settingsCl := client.NewSettingsClient(addr, token)
for {
reportResp, wqResp, errs := settingsCl.WorkerQueueLen()
if len(errs) > 0 {
t.Fatal(errs)
} else if reportResp.StatusCode != 200 {
t.Fatal(reportResp.StatusCode)
}
if wqResp.QueueLen == 0 {
break
} else {
fmt.Printf("%d messages left\n", wqResp.QueueLen)
time.Sleep(1 * time.Second)
}
}
// TODO: currently async worker status is not visible
// still need to wait for worker finishing indexing...
time.Sleep(3 * time.Second)
resp, searchItemsResp, errs := adminFilesClient.SearchItems([]string{"reindexkey"})
if len(errs) > 0 {
t.Fatal(errs)
} else if resp.StatusCode != 200 {
t.Fatal(resp.StatusCode)
}
if !compareSearchResults(newFiles, searchItemsResp.Results) {
fmt.Printf("expected(%+v) got(%+v)", newFiles, searchItemsResp.Results)
t.Fatal("search result not match")
}
})
resp, _, errs = usersCl.Logout()
if len(errs) > 0 {
t.Fatal(errs)
} else if resp.StatusCode != 200 {
t.Fatal(resp.StatusCode)
}
}