feat(qs2) add qs2 framework
This commit is contained in:
parent
6ae65fe09b
commit
83100007e3
33 changed files with 2934 additions and 60 deletions
119
src/client/http.go
Normal file
119
src/client/http.go
Normal file
|
@ -0,0 +1,119 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/ihexxa/quickshare/src/handlers/fileshdr"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
)
|
||||
|
||||
type QSClient struct {
|
||||
addr string
|
||||
r *gorequest.SuperAgent
|
||||
}
|
||||
|
||||
func NewQSClient(addr string) *QSClient {
|
||||
gr := gorequest.New()
|
||||
return &QSClient{
|
||||
addr: addr,
|
||||
r: gr,
|
||||
}
|
||||
}
|
||||
|
||||
func (cl *QSClient) url(urlpath string) string {
|
||||
return fmt.Sprintf("%s%s", cl.addr, urlpath)
|
||||
}
|
||||
|
||||
func (cl *QSClient) Create(filepath string, size int64) (gorequest.Response, string, []error) {
|
||||
return cl.r.Post(cl.url("/v1/fs/files")).
|
||||
Send(fileshdr.CreateReq{
|
||||
Path: filepath,
|
||||
FileSize: size,
|
||||
}).
|
||||
End()
|
||||
}
|
||||
|
||||
func (cl *QSClient) Delete(filepath string) (gorequest.Response, string, []error) {
|
||||
return cl.r.Delete(cl.url("/v1/fs/files")).
|
||||
Param(fileshdr.FilePathQuery, filepath).
|
||||
End()
|
||||
}
|
||||
|
||||
func (cl *QSClient) Metadata(filepath string) (gorequest.Response, *fileshdr.MetadataResp, []error) {
|
||||
resp, body, errs := cl.r.Get(cl.url("/v1/fs/metadata")).
|
||||
Param(fileshdr.FilePathQuery, filepath).
|
||||
End()
|
||||
|
||||
mResp := &fileshdr.MetadataResp{}
|
||||
err := json.Unmarshal([]byte(body), mResp)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
return nil, nil, errs
|
||||
}
|
||||
return resp, mResp, nil
|
||||
}
|
||||
|
||||
func (cl *QSClient) Mkdir(dirpath string) (gorequest.Response, string, []error) {
|
||||
return cl.r.Post(cl.url("/v1/fs/dirs")).
|
||||
Send(fileshdr.MkdirReq{Path: dirpath}).
|
||||
End()
|
||||
}
|
||||
|
||||
func (cl *QSClient) Move(oldpath, newpath string) (gorequest.Response, string, []error) {
|
||||
return cl.r.Patch(cl.url("/v1/fs/files/move")).
|
||||
Send(fileshdr.MoveReq{
|
||||
OldPath: oldpath,
|
||||
NewPath: newpath,
|
||||
}).
|
||||
End()
|
||||
}
|
||||
|
||||
func (cl *QSClient) UploadChunk(filepath string, content string, offset int64) (gorequest.Response, string, []error) {
|
||||
return cl.r.Patch(cl.url("/v1/fs/files/chunks")).
|
||||
Send(fileshdr.UploadChunkReq{
|
||||
Path: filepath,
|
||||
Content: content,
|
||||
Offset: offset,
|
||||
}).
|
||||
End()
|
||||
}
|
||||
|
||||
func (cl *QSClient) UploadStatus(filepath string) (gorequest.Response, *fileshdr.UploadStatusResp, []error) {
|
||||
resp, body, errs := cl.r.Get(cl.url("/v1/fs/files/chunks")).
|
||||
Param(fileshdr.FilePathQuery, filepath).
|
||||
End()
|
||||
|
||||
uResp := &fileshdr.UploadStatusResp{}
|
||||
err := json.Unmarshal([]byte(body), uResp)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
return nil, nil, errs
|
||||
}
|
||||
return resp, uResp, nil
|
||||
}
|
||||
|
||||
func (cl *QSClient) Download(filepath string, headers map[string]string) (gorequest.Response, string, []error) {
|
||||
r := cl.r.Get(cl.url("/v1/fs/files/chunks")).
|
||||
Param(fileshdr.FilePathQuery, filepath)
|
||||
for key, val := range headers {
|
||||
r = r.Set(key, val)
|
||||
}
|
||||
return r.End()
|
||||
}
|
||||
|
||||
func (cl *QSClient) List(dirPath string) (gorequest.Response, *fileshdr.ListResp, []error) {
|
||||
resp, body, errs := cl.r.Get(cl.url("/v1/fs/dirs")).
|
||||
Param(fileshdr.ListDirQuery, dirPath).
|
||||
End()
|
||||
if len(errs) > 0 {
|
||||
return nil, nil, errs
|
||||
}
|
||||
|
||||
lResp := &fileshdr.ListResp{}
|
||||
err := json.Unmarshal([]byte(body), lResp)
|
||||
if err != nil {
|
||||
return nil, nil, append(errs, err)
|
||||
}
|
||||
return resp, lResp, nil
|
||||
}
|
6
src/cryptoutil/cryptoutil_interface.go
Normal file
6
src/cryptoutil/cryptoutil_interface.go
Normal file
|
@ -0,0 +1,6 @@
|
|||
package cryptoutil
|
||||
|
||||
type ITokenEncDec interface {
|
||||
FromToken(token string, kvs map[string]string) (map[string]string, error)
|
||||
ToToken(kvs map[string]string) (string, error)
|
||||
}
|
51
src/cryptoutil/jwt/jwt.go
Normal file
51
src/cryptoutil/jwt/jwt.go
Normal file
|
@ -0,0 +1,51 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
jwtpkg "github.com/robbert229/jwt"
|
||||
)
|
||||
|
||||
type JWTEncDec struct {
|
||||
alg jwtpkg.Algorithm
|
||||
}
|
||||
|
||||
func NewJWTEncDec(secret string) *JWTEncDec {
|
||||
return &JWTEncDec{
|
||||
alg: jwtpkg.HmacSha256(secret),
|
||||
}
|
||||
}
|
||||
|
||||
func (ed *JWTEncDec) FromToken(token string, kvs map[string]string) (map[string]string, error) {
|
||||
claims, err := ed.alg.Decode(token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for key := range kvs {
|
||||
iVal, err := claims.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
strVal, ok := iVal.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("incorrect JWT claim")
|
||||
}
|
||||
|
||||
kvs[key] = strVal
|
||||
}
|
||||
return kvs, nil
|
||||
}
|
||||
|
||||
func (ed *JWTEncDec) ToToken(kvs map[string]string) (string, error) {
|
||||
claims := jwtpkg.NewClaim()
|
||||
for key, val := range kvs {
|
||||
claims.Set(key, val)
|
||||
}
|
||||
|
||||
token, err := ed.alg.Encode(claims)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return token, nil
|
||||
}
|
79
src/depidx/deps.go
Normal file
79
src/depidx/deps.go
Normal file
|
@ -0,0 +1,79 @@
|
|||
package depidx
|
||||
|
||||
import (
|
||||
"github.com/ihexxa/gocfg"
|
||||
"github.com/ihexxa/quickshare/src/cryptoutil"
|
||||
"github.com/ihexxa/quickshare/src/fs"
|
||||
"github.com/ihexxa/quickshare/src/idgen"
|
||||
"github.com/ihexxa/quickshare/src/kvstore"
|
||||
"github.com/ihexxa/quickshare/src/logging"
|
||||
)
|
||||
|
||||
type IUploader interface {
|
||||
Create(filePath string, size int64) error
|
||||
WriteChunk(filePath string, chunk []byte, off int64) (int, error)
|
||||
Status(filePath string) (int64, bool, error)
|
||||
Close() error
|
||||
Sync() error
|
||||
}
|
||||
|
||||
type Deps struct {
|
||||
fs fs.ISimpleFS
|
||||
token cryptoutil.ITokenEncDec
|
||||
log logging.ILogger
|
||||
kv kvstore.IKVStore
|
||||
uploader IUploader
|
||||
id idgen.IIDGen
|
||||
}
|
||||
|
||||
func NewDeps(cfg gocfg.ICfg) *Deps {
|
||||
return &Deps{}
|
||||
}
|
||||
|
||||
func (deps *Deps) FS() fs.ISimpleFS {
|
||||
return deps.fs
|
||||
}
|
||||
|
||||
func (deps *Deps) SetFS(filesystem fs.ISimpleFS) {
|
||||
deps.fs = filesystem
|
||||
}
|
||||
|
||||
func (deps *Deps) Token() cryptoutil.ITokenEncDec {
|
||||
return deps.token
|
||||
}
|
||||
|
||||
func (deps *Deps) SetToken(tokenMaker cryptoutil.ITokenEncDec) {
|
||||
deps.token = tokenMaker
|
||||
}
|
||||
|
||||
func (deps *Deps) Log() logging.ILogger {
|
||||
return deps.log
|
||||
}
|
||||
|
||||
func (deps *Deps) SetLog(logger logging.ILogger) {
|
||||
deps.log = logger
|
||||
}
|
||||
|
||||
func (deps *Deps) KV() kvstore.IKVStore {
|
||||
return deps.kv
|
||||
}
|
||||
|
||||
func (deps *Deps) SetKV(kvstore kvstore.IKVStore) {
|
||||
deps.kv = kvstore
|
||||
}
|
||||
|
||||
func (deps *Deps) Uploader() IUploader {
|
||||
return deps.uploader
|
||||
}
|
||||
|
||||
func (deps *Deps) SetUploader(uploader IUploader) {
|
||||
deps.uploader = uploader
|
||||
}
|
||||
|
||||
func (deps *Deps) ID() idgen.IIDGen {
|
||||
return deps.id
|
||||
}
|
||||
|
||||
func (deps *Deps) SetID(ider idgen.IIDGen) {
|
||||
deps.id = ider
|
||||
}
|
7
src/downloadmgr/mgr.go
Normal file
7
src/downloadmgr/mgr.go
Normal file
|
@ -0,0 +1,7 @@
|
|||
package downloadmgr
|
||||
|
||||
type DownloadMgr struct{}
|
||||
|
||||
func NewDownloadMgr() *DownloadMgr {
|
||||
return &DownloadMgr{}
|
||||
}
|
28
src/fs/fs_interface.go
Normal file
28
src/fs/fs_interface.go
Normal file
|
@ -0,0 +1,28 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
type ReadCloseSeeker interface {
|
||||
io.Reader
|
||||
io.ReaderFrom
|
||||
io.Closer
|
||||
io.Seeker
|
||||
}
|
||||
|
||||
type ISimpleFS interface {
|
||||
Create(path string) error
|
||||
MkdirAll(path string) error
|
||||
Remove(path string) error
|
||||
Rename(oldpath, newpath string) error
|
||||
ReadAt(path string, b []byte, off int64) (n int, err error)
|
||||
WriteAt(path string, b []byte, off int64) (n int, err error)
|
||||
Stat(path string) (os.FileInfo, error)
|
||||
Close() error
|
||||
Sync() error
|
||||
GetFileReader(path string) (ReadCloseSeeker, error)
|
||||
Root() string
|
||||
ListDir(path string) ([]os.FileInfo, error)
|
||||
}
|
314
src/fs/local/fs.go
Normal file
314
src/fs/local/fs.go
Normal file
|
@ -0,0 +1,314 @@
|
|||
package local
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ihexxa/quickshare/src/fs"
|
||||
)
|
||||
|
||||
var ErrTooManyOpens = errors.New("too many opened files")
|
||||
|
||||
type LocalFS struct {
|
||||
root string
|
||||
defaultPerm os.FileMode
|
||||
defaultDirPerm os.FileMode
|
||||
opens map[string]*fileInfo
|
||||
opensLimit int
|
||||
opensMtx *sync.RWMutex
|
||||
opensCleanSize int
|
||||
openTTL time.Duration
|
||||
readers map[string]*fileInfo
|
||||
}
|
||||
|
||||
type fileInfo struct {
|
||||
lastAccess time.Time
|
||||
fd *os.File
|
||||
}
|
||||
|
||||
func NewLocalFS(root string, defaultPerm uint32, opensLimit, openTTL int) *LocalFS {
|
||||
if root == "" {
|
||||
root = "."
|
||||
}
|
||||
return &LocalFS{
|
||||
root: root,
|
||||
defaultPerm: os.FileMode(defaultPerm),
|
||||
defaultDirPerm: os.FileMode(0775),
|
||||
opens: map[string]*fileInfo{},
|
||||
opensLimit: opensLimit,
|
||||
openTTL: time.Duration(openTTL) * time.Second,
|
||||
opensMtx: &sync.RWMutex{},
|
||||
opensCleanSize: 10,
|
||||
readers: map[string]*fileInfo{}, // TODO: track readers and close idles
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *LocalFS) Root() string {
|
||||
return fs.root
|
||||
}
|
||||
|
||||
// closeOpens assumes that it is called after opensMtx.Lock()
|
||||
func (fs *LocalFS) closeOpens(closeAll bool) error {
|
||||
batch := fs.opensCleanSize
|
||||
|
||||
var err error
|
||||
for key, info := range fs.opens {
|
||||
if batch <= 0 && !closeAll {
|
||||
break
|
||||
}
|
||||
batch--
|
||||
|
||||
if info.lastAccess.Add(fs.openTTL).Before(time.Now()) {
|
||||
delete(fs.opens, key)
|
||||
if err = info.fd.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := info.fd.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *LocalFS) Sync() error {
|
||||
fs.opensMtx.Lock()
|
||||
defer fs.opensMtx.Unlock()
|
||||
return fs.closeOpens(true)
|
||||
}
|
||||
|
||||
// check refers implementation of Dir.Open() in http package
|
||||
func (fs *LocalFS) translate(name string) (string, error) {
|
||||
if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) {
|
||||
return "", errors.New("invalid character in file path")
|
||||
}
|
||||
return filepath.Join(fs.root, filepath.FromSlash(path.Clean("/"+name))), nil
|
||||
}
|
||||
|
||||
func (fs *LocalFS) Create(path string) error {
|
||||
fullpath, err := fs.translate(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fd, err := os.OpenFile(fullpath, os.O_CREATE|os.O_RDWR|os.O_EXCL, fs.defaultPerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fs.opensMtx.Lock()
|
||||
defer fs.opensMtx.Unlock()
|
||||
if len(fs.opens) > fs.opensLimit {
|
||||
return ErrTooManyOpens
|
||||
}
|
||||
|
||||
fs.opens[fullpath] = &fileInfo{
|
||||
lastAccess: time.Now(),
|
||||
fd: fd,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *LocalFS) MkdirAll(path string) error {
|
||||
fullpath, err := fs.translate(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.MkdirAll(fullpath, fs.defaultDirPerm)
|
||||
}
|
||||
|
||||
func (fs *LocalFS) Remove(entryPath string) error {
|
||||
fullpath, err := fs.translate(entryPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Remove(fullpath)
|
||||
}
|
||||
|
||||
func (fs *LocalFS) Rename(oldpath, newpath string) error {
|
||||
fullOldPath, err := fs.translate(oldpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = os.Stat(fullOldPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fullNewPath, err := fs.translate(newpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// avoid replacing existing file/folder
|
||||
_, err = os.Stat(fullNewPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return os.Rename(fullOldPath, fullNewPath)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return os.ErrExist
|
||||
}
|
||||
|
||||
func (fs *LocalFS) ReadAt(path string, b []byte, off int64) (int, error) {
|
||||
fullpath, err := fs.translate(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
info, err := func() (*fileInfo, error) {
|
||||
fs.opensMtx.Lock()
|
||||
defer fs.opensMtx.Unlock()
|
||||
|
||||
info, ok := fs.opens[fullpath]
|
||||
if !ok {
|
||||
if len(fs.opens) > fs.opensLimit {
|
||||
return nil, ErrTooManyOpens
|
||||
}
|
||||
|
||||
fd, err := os.OpenFile(fullpath, os.O_RDWR|os.O_APPEND, fs.defaultPerm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info = &fileInfo{
|
||||
fd: fd,
|
||||
lastAccess: time.Now(),
|
||||
}
|
||||
fs.opens[fullpath] = info
|
||||
fs.closeOpens(false)
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
newOffset, err := info.fd.Seek(off, os.SEEK_SET)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if newOffset != off {
|
||||
// TODO: will this happen?
|
||||
return 0, fmt.Errorf("seek offset (%d) != required(%d)", newOffset, off)
|
||||
}
|
||||
|
||||
return info.fd.ReadAt(b, off)
|
||||
}
|
||||
|
||||
func (fs *LocalFS) WriteAt(path string, b []byte, off int64) (int, error) {
|
||||
fullpath, err := fs.translate(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
info, err := func() (*fileInfo, error) {
|
||||
fs.opensMtx.Lock()
|
||||
defer fs.opensMtx.Unlock()
|
||||
|
||||
info, ok := fs.opens[fullpath]
|
||||
if !ok {
|
||||
if len(fs.opens) > fs.opensLimit {
|
||||
return nil, ErrTooManyOpens
|
||||
}
|
||||
|
||||
// it does NOT create file for writing
|
||||
fd, err := os.OpenFile(fullpath, os.O_RDWR|os.O_APPEND, fs.defaultPerm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info = &fileInfo{
|
||||
fd: fd,
|
||||
lastAccess: time.Now(),
|
||||
}
|
||||
fs.opens[fullpath] = info
|
||||
fs.closeOpens(false)
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
newOffset, err := info.fd.Seek(off, os.SEEK_SET)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if newOffset != off {
|
||||
// TODO: will this happen?
|
||||
return 0, fmt.Errorf("seek offset (%d) != required(%d)", newOffset, off)
|
||||
}
|
||||
|
||||
return info.fd.WriteAt(b, off)
|
||||
}
|
||||
|
||||
func (fs *LocalFS) Stat(path string) (os.FileInfo, error) {
|
||||
fullpath, err := fs.translate(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs.opensMtx.RLock()
|
||||
info, ok := fs.opens[fullpath]
|
||||
fs.opensMtx.RUnlock()
|
||||
if ok {
|
||||
return info.fd.Stat()
|
||||
}
|
||||
return os.Stat(fullpath)
|
||||
}
|
||||
|
||||
func (fs *LocalFS) Close() error {
|
||||
fs.opensMtx.Lock()
|
||||
defer fs.opensMtx.Unlock()
|
||||
|
||||
var err error
|
||||
for filePath, info := range fs.opens {
|
||||
err = info.fd.Sync()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = info.fd.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delete(fs.opens, filePath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readers are not tracked by opens
|
||||
func (fs *LocalFS) GetFileReader(path string) (fs.ReadCloseSeeker, error) {
|
||||
fullpath, err := fs.translate(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fd, err := os.OpenFile(fullpath, os.O_RDONLY, fs.defaultPerm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs.readers[fullpath] = &fileInfo{
|
||||
fd: fd,
|
||||
lastAccess: time.Now(),
|
||||
}
|
||||
return fd, nil
|
||||
}
|
||||
|
||||
func (fs *LocalFS) ListDir(path string) ([]os.FileInfo, error) {
|
||||
fullpath, err := fs.translate(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ioutil.ReadDir(fullpath)
|
||||
}
|
187
src/fs/mem/fs.go
Normal file
187
src/fs/mem/fs.go
Normal file
|
@ -0,0 +1,187 @@
|
|||
package mem
|
||||
|
||||
// type MemFS struct {
|
||||
// files map[string][]byte
|
||||
// dirs map[string][]string
|
||||
// }
|
||||
|
||||
// type MemFileInfo struct {
|
||||
// name string
|
||||
// size int64
|
||||
// isDir bool
|
||||
// }
|
||||
|
||||
// func (fi *MemFileInfo) Name() string {
|
||||
// return fi.name
|
||||
// }
|
||||
// func (fi *MemFileInfo) Size() int64 {
|
||||
// return fi.size
|
||||
// }
|
||||
// func (fi *MemFileInfo) Mode() os.FileMode {
|
||||
// return 0666
|
||||
// }
|
||||
// func (fi *MemFileInfo) ModTime() time.Time {
|
||||
// return time.Now()
|
||||
// }
|
||||
// func (fi *MemFileInfo) IsDir() bool {
|
||||
// return fi.isDir
|
||||
// }
|
||||
// func (fi *MemFileInfo) Sys() interface{} {
|
||||
// return ""
|
||||
// }
|
||||
|
||||
// func NewMemFS() *MemFS {
|
||||
// return &MemFS{
|
||||
// files: map[string][]byte{},
|
||||
// dirs: map[string][]string{},
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Create(filePath string) error
|
||||
// // MkdirAll(filePath string) error
|
||||
// // Remove(filePath string) error
|
||||
|
||||
// func (fs *MemFS) Create(filePath string) error {
|
||||
// dirPath := path.Dir(filePath)
|
||||
// files, ok := fs.dirs[dirPath]
|
||||
// if !ok {
|
||||
// fs.dirs[dirPath] = []string{}
|
||||
// }
|
||||
|
||||
// fs.dirs[dirPath] = append(fs.dirs[dirPath], filePath)
|
||||
// fs.files[filePath] = []byte("")
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func (fs *MemFS) MkdirAll(dirPath string) error {
|
||||
// _, ok := fs.dirs[dirPath]
|
||||
// if ok {
|
||||
// return os.ErrExist
|
||||
// }
|
||||
// fs.dirs[dirPath] = []string{}
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func (fs *MemFS) Remove(filePath string) error {
|
||||
// files, ok := fs.dirs[filePath]
|
||||
// if ok {
|
||||
// for _, fileName := range files {
|
||||
// d
|
||||
// }
|
||||
// }
|
||||
|
||||
// delete(fs.dirs, filePath)
|
||||
// delete(fs.files, filePath)
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func (fs *MemFS) Rename(oldpath, newpath string) error {
|
||||
// content, ok := fs.files[oldpath]
|
||||
// if !ok {
|
||||
// return os.ErrNotExist
|
||||
// }
|
||||
// delete(fs.files, oldpath)
|
||||
|
||||
// newDir := path.Dir(newpath)
|
||||
// _, ok = fs.dirs[newDir]
|
||||
// if !ok {
|
||||
// fs.dirs[newDir] = []string{}
|
||||
// }
|
||||
// fs.dirs[newDir] = append(fs.dirs[newDir], newpath)
|
||||
// fs.files[newpath] = content
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func (fs *MemFS) ReadAt(filePath string, b []byte, off int64) (n int, err error) {
|
||||
// content, ok := fs.files[filePath]
|
||||
// if !ok {
|
||||
// return 0, os.ErrNotExist
|
||||
// }
|
||||
|
||||
// if off >= int64(len(content)) {
|
||||
// return 0, errors.New("offset > fileSize")
|
||||
// }
|
||||
// right := off + int64(len(b))
|
||||
// if right > int64(len(content)) {
|
||||
// right = int64(len(content))
|
||||
// }
|
||||
// return copy(b, content[off:right]), nil
|
||||
// }
|
||||
|
||||
// func (fs *MemFS) WriteAt(filePath string, b []byte, off int64) (n int, err error) {
|
||||
// content, ok := fs.files[filePath]
|
||||
// if !ok {
|
||||
// return 0, os.ErrNotExist
|
||||
// }
|
||||
|
||||
// if off >= int64(len(content)) {
|
||||
// return 0, errors.New("offset > fileSize")
|
||||
// } else if off+int64(len(b)) > int64(len(content)) {
|
||||
// fs.files[filePath] = append(
|
||||
// fs.files[filePath],
|
||||
// make([]byte, off+int64(len(b))-int64(len(content)))...,
|
||||
// )
|
||||
// }
|
||||
|
||||
// copy(fs.files[filePath][off:], b)
|
||||
// return len(b), nil
|
||||
// }
|
||||
|
||||
// func (fs *MemFS) Stat(filePath string) (os.FileInfo, error) {
|
||||
// _, ok := fs.dirs[filePath]
|
||||
// if ok {
|
||||
// return &MemFileInfo{
|
||||
// name: filePath,
|
||||
// size: 0,
|
||||
// isDir: true,
|
||||
// }, nil
|
||||
// }
|
||||
|
||||
// content, ok := fs.files[filePath]
|
||||
// if ok {
|
||||
// return &MemFileInfo{
|
||||
// name: filePath,
|
||||
// size: int64(len(content)),
|
||||
// isDir: false,
|
||||
// }, nil
|
||||
// }
|
||||
// return nil, os.ErrNotExist
|
||||
// }
|
||||
|
||||
// func (fs *MemFS) Close() error {
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func (fs *MemFS) Sync() error {
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func (fs *MemFS) GetFileReader(filePath string) (ReadCloseSeeker, error) {
|
||||
// content, ok := fs.files[filePath]
|
||||
// if !ok {
|
||||
// return nil, os.ErrNotExist
|
||||
// }
|
||||
// return bytes.NewReader(content)
|
||||
// }
|
||||
|
||||
// func (fs *MemFS) Root() string {
|
||||
// return ""
|
||||
// }
|
||||
|
||||
// func (fs *MemFS) ListDir(filePath string) ([]os.FileInfo, error) {
|
||||
// files, ok := fs.dirs[filePath]
|
||||
// if !ok {
|
||||
// return nil, os.ErrNotExist
|
||||
// }
|
||||
|
||||
// infos := []*MemFileInfo{}
|
||||
// for _, fileName := range files {
|
||||
// infos = append(infos, &MemFileInfo{
|
||||
// name: fileName,
|
||||
// size: int64(len(fs.files[fileName])),
|
||||
// isDir: false,
|
||||
// })
|
||||
// }
|
||||
|
||||
// return infos
|
||||
// }
|
|
@ -1,12 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func Upload(ctx *gin.Context) {}
|
||||
func List(ctx *gin.Context) {}
|
||||
func Delete(ctx *gin.Context) {}
|
||||
func Metadata(ctx *gin.Context) {}
|
||||
func Copy(ctx *gin.Context) {}
|
||||
func Move(ctx *gin.Context) {}
|
414
src/handlers/fileshdr/handlers.go
Normal file
414
src/handlers/fileshdr/handlers.go
Normal file
|
@ -0,0 +1,414 @@
|
|||
package fileshdr
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/ihexxa/gocfg"
|
||||
"github.com/ihexxa/multipart"
|
||||
|
||||
"github.com/ihexxa/quickshare/src/depidx"
|
||||
q "github.com/ihexxa/quickshare/src/handlers"
|
||||
)
|
||||
|
||||
var (
|
||||
// dirs
|
||||
UploadDir = "uploadings"
|
||||
FsDir = "files"
|
||||
|
||||
// queries
|
||||
FilePathQuery = "fp"
|
||||
ListDirQuery = "dp"
|
||||
|
||||
// headers
|
||||
rangeHeader = "Range"
|
||||
acceptRangeHeader = "Accept-Range"
|
||||
ifRangeHeader = "If-Range"
|
||||
)
|
||||
|
||||
type FileHandlers struct {
|
||||
cfg gocfg.ICfg
|
||||
deps *depidx.Deps
|
||||
uploadMgr *UploadMgr
|
||||
}
|
||||
|
||||
func NewFileHandlers(cfg gocfg.ICfg, deps *depidx.Deps) (*FileHandlers, error) {
|
||||
var err error
|
||||
if err = deps.FS().MkdirAll(UploadDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = deps.FS().MkdirAll(FsDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &FileHandlers{
|
||||
cfg: cfg,
|
||||
deps: deps,
|
||||
uploadMgr: NewUploadMgr(deps.KV()),
|
||||
}, err
|
||||
}
|
||||
|
||||
type AutoLocker struct {
|
||||
h *FileHandlers
|
||||
c *gin.Context
|
||||
key string
|
||||
}
|
||||
|
||||
func (h *FileHandlers) NewAutoLocker(c *gin.Context, key string) *AutoLocker {
|
||||
return &AutoLocker{
|
||||
h: h,
|
||||
c: c,
|
||||
key: key,
|
||||
}
|
||||
}
|
||||
|
||||
func (lk *AutoLocker) Exec(handler func()) {
|
||||
var err error
|
||||
kv := lk.h.deps.KV()
|
||||
if err = kv.TryLock(lk.key); err != nil {
|
||||
lk.c.JSON(q.Resp(500))
|
||||
return
|
||||
}
|
||||
|
||||
handler()
|
||||
|
||||
if err = kv.Unlock(lk.key); err != nil {
|
||||
// TODO: use logger
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
type CreateReq struct {
|
||||
Path string `json:"path"`
|
||||
FileSize int64 `json:"fileSize"`
|
||||
}
|
||||
|
||||
func (h *FileHandlers) Create(c *gin.Context) {
|
||||
req := &CreateReq{}
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
|
||||
tmpFilePath := h.GetTmpPath(req.Path)
|
||||
locker := h.NewAutoLocker(c, tmpFilePath)
|
||||
locker.Exec(func() {
|
||||
err := h.deps.FS().Create(tmpFilePath)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
err = h.uploadMgr.AddInfo(req.Path, tmpFilePath, req.FileSize, false)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
|
||||
fileDir := h.FsPath(filepath.Dir(req.Path))
|
||||
err = h.deps.FS().MkdirAll(fileDir)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
c.JSON(q.Resp(200))
|
||||
}
|
||||
|
||||
func (h *FileHandlers) Delete(c *gin.Context) {
|
||||
filePath := c.Query(FilePathQuery)
|
||||
if filePath == "" {
|
||||
c.JSON(q.ErrResp(c, 400, errors.New("invalid file path")))
|
||||
return
|
||||
}
|
||||
|
||||
filePath = h.FsPath(filePath)
|
||||
err := h.deps.FS().Remove(filePath)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(q.Resp(200))
|
||||
}
|
||||
|
||||
type MetadataResp struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
ModTime time.Time `json:"modTime"`
|
||||
IsDir bool `json:"isDir"`
|
||||
}
|
||||
|
||||
func (h *FileHandlers) Metadata(c *gin.Context) {
|
||||
filePath := c.Query(FilePathQuery)
|
||||
if filePath == "" {
|
||||
c.JSON(q.ErrResp(c, 400, errors.New("invalid file path")))
|
||||
return
|
||||
}
|
||||
|
||||
filePath = h.FsPath(filePath)
|
||||
info, err := h.deps.FS().Stat(filePath)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(200, MetadataResp{
|
||||
Name: info.Name(),
|
||||
Size: info.Size(),
|
||||
ModTime: info.ModTime(),
|
||||
IsDir: info.IsDir(),
|
||||
})
|
||||
}
|
||||
|
||||
type MkdirReq struct {
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
func (h *FileHandlers) Mkdir(c *gin.Context) {
|
||||
req := &MkdirReq{}
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(q.ErrResp(c, 400, err))
|
||||
return
|
||||
}
|
||||
|
||||
dirPath := h.FsPath(req.Path)
|
||||
err := h.deps.FS().MkdirAll(dirPath)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(q.Resp(200))
|
||||
}
|
||||
|
||||
type MoveReq struct {
|
||||
OldPath string `json:"oldPath"`
|
||||
NewPath string `json:"newPath"`
|
||||
}
|
||||
|
||||
func (h *FileHandlers) Move(c *gin.Context) {
|
||||
req := &MoveReq{}
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(q.ErrResp(c, 400, err))
|
||||
return
|
||||
}
|
||||
|
||||
oldPath := h.FsPath(req.OldPath)
|
||||
newPath := h.FsPath(req.NewPath)
|
||||
_, err := h.deps.FS().Stat(oldPath)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
_, err = h.deps.FS().Stat(newPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
} else if err == nil {
|
||||
// err is nil because file exists
|
||||
c.JSON(q.ErrResp(c, 400, os.ErrExist))
|
||||
return
|
||||
}
|
||||
|
||||
err = h.deps.FS().Rename(oldPath, newPath)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(q.Resp(200))
|
||||
}
|
||||
|
||||
type UploadChunkReq struct {
|
||||
Path string `json:"path"`
|
||||
Content string `json:"content"`
|
||||
Offset int64 `json:"offset"`
|
||||
}
|
||||
|
||||
func (h *FileHandlers) UploadChunk(c *gin.Context) {
|
||||
req := &UploadChunkReq{}
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
|
||||
tmpFilePath := h.GetTmpPath(req.Path)
|
||||
locker := h.NewAutoLocker(c, tmpFilePath)
|
||||
locker.Exec(func() {
|
||||
var err error
|
||||
|
||||
_, fileSize, uploaded, err := h.uploadMgr.GetInfo(tmpFilePath)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
} else if uploaded != req.Offset {
|
||||
c.JSON(q.ErrResp(c, 500, errors.New("offset != uploaded")))
|
||||
return
|
||||
}
|
||||
|
||||
wrote, err := h.deps.FS().WriteAt(tmpFilePath, []byte(req.Content), req.Offset)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
err = h.uploadMgr.IncreUploaded(tmpFilePath, int64(wrote))
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
|
||||
// move the file from uploading dir to uploaded dir
|
||||
if uploaded+int64(wrote) == fileSize {
|
||||
fsFilePath := h.FsPath(req.Path)
|
||||
err = h.deps.FS().Rename(tmpFilePath, fsFilePath)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
err = h.uploadMgr.DelInfo(tmpFilePath)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.JSON(200, &UploadStatusResp{
|
||||
Path: req.Path,
|
||||
IsDir: false,
|
||||
FileSize: fileSize,
|
||||
Uploaded: uploaded + int64(wrote),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
type UploadStatusResp struct {
|
||||
Path string `json:"path"`
|
||||
IsDir bool `json:"isDir"`
|
||||
FileSize int64 `json:"fileSize"`
|
||||
Uploaded int64 `json:"uploaded"`
|
||||
}
|
||||
|
||||
func (h *FileHandlers) UploadStatus(c *gin.Context) {
|
||||
filePath := c.Query(FilePathQuery)
|
||||
if filePath == "" {
|
||||
c.JSON(q.ErrResp(c, 400, errors.New("invalid file name")))
|
||||
}
|
||||
|
||||
tmpFilePath := h.GetTmpPath(filePath)
|
||||
locker := h.NewAutoLocker(c, tmpFilePath)
|
||||
locker.Exec(func() {
|
||||
_, fileSize, uploaded, err := h.uploadMgr.GetInfo(tmpFilePath)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(200, &UploadStatusResp{
|
||||
Path: filePath,
|
||||
IsDir: false,
|
||||
FileSize: fileSize,
|
||||
Uploaded: uploaded,
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: support ETag
|
||||
// TODO: use correct content type
|
||||
func (h *FileHandlers) Download(c *gin.Context) {
|
||||
rangeVal := c.GetHeader(rangeHeader)
|
||||
ifRangeVal := c.GetHeader(ifRangeHeader)
|
||||
filePath := c.Query(FilePathQuery)
|
||||
if filePath == "" {
|
||||
c.JSON(q.ErrResp(c, 400, errors.New("invalid file name")))
|
||||
}
|
||||
|
||||
// concurrency relies on os's mechanism
|
||||
filePath = h.FsPath(filePath)
|
||||
info, err := h.deps.FS().Stat(filePath)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 400, err))
|
||||
return
|
||||
} else if info.IsDir() {
|
||||
c.JSON(q.ErrResp(c, 501, errors.New("downloading a folder is not supported")))
|
||||
}
|
||||
|
||||
r, err := h.deps.FS().GetFileReader(filePath)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
}
|
||||
|
||||
// respond to normal requests
|
||||
if ifRangeVal != "" || rangeVal == "" {
|
||||
c.DataFromReader(200, info.Size(), "application/octet-stream", r, map[string]string{})
|
||||
return
|
||||
}
|
||||
|
||||
// respond to range requests
|
||||
parts, err := multipart.RangeToParts(rangeVal, "application/octet-stream", fmt.Sprintf("%d", info.Size()))
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 400, err))
|
||||
}
|
||||
pr, pw := io.Pipe()
|
||||
err = multipart.WriteResponse(r, pw, filePath, parts)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
}
|
||||
extraHeaders := map[string]string{
|
||||
"Content-Disposition": fmt.Sprintf(`attachment; filename="%s"`, filePath),
|
||||
}
|
||||
c.DataFromReader(206, info.Size(), "application/octet-stream", pr, extraHeaders)
|
||||
}
|
||||
|
||||
type ListResp struct {
|
||||
Metadatas []*MetadataResp `json:"metadatas"`
|
||||
}
|
||||
|
||||
func (h *FileHandlers) List(c *gin.Context) {
|
||||
dirPath := c.Query(ListDirQuery)
|
||||
if dirPath == "" {
|
||||
c.JSON(q.ErrResp(c, 400, errors.New("incorrect path name")))
|
||||
return
|
||||
}
|
||||
|
||||
dirPath = h.FsPath(dirPath)
|
||||
infos, err := h.deps.FS().ListDir(dirPath)
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
metadatas := []*MetadataResp{}
|
||||
for _, info := range infos {
|
||||
metadatas = append(metadatas, &MetadataResp{
|
||||
Name: info.Name(),
|
||||
Size: info.Size(),
|
||||
ModTime: info.ModTime(),
|
||||
IsDir: info.IsDir(),
|
||||
})
|
||||
}
|
||||
|
||||
c.JSON(200, &ListResp{Metadatas: metadatas})
|
||||
}
|
||||
|
||||
func (h *FileHandlers) Copy(c *gin.Context) {
|
||||
c.JSON(q.NewMsgResp(501, "Not Implemented"))
|
||||
}
|
||||
|
||||
func (h *FileHandlers) CopyDir(c *gin.Context) {
|
||||
c.JSON(q.NewMsgResp(501, "Not Implemented"))
|
||||
}
|
||||
|
||||
func (h *FileHandlers) GetTmpPath(filePath string) string {
|
||||
return path.Join(UploadDir, fmt.Sprintf("%x", sha1.Sum([]byte(filePath))))
|
||||
}
|
||||
|
||||
func (h *FileHandlers) FsPath(filePath string) string {
|
||||
return path.Join(FsDir, filePath)
|
||||
}
|
85
src/handlers/fileshdr/upload_mgr.go
Normal file
85
src/handlers/fileshdr/upload_mgr.go
Normal file
|
@ -0,0 +1,85 @@
|
|||
package fileshdr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/ihexxa/quickshare/src/kvstore"
|
||||
)
|
||||
|
||||
var (
|
||||
isDirKey = "isDir"
|
||||
fileSizeKey = "fileSize"
|
||||
uploadedKey = "uploaded"
|
||||
filePathKey = "fileName"
|
||||
)
|
||||
|
||||
type UploadMgr struct {
|
||||
kv kvstore.IKVStore
|
||||
}
|
||||
|
||||
func NewUploadMgr(kv kvstore.IKVStore) *UploadMgr {
|
||||
return &UploadMgr{
|
||||
kv: kv,
|
||||
}
|
||||
}
|
||||
|
||||
func (um *UploadMgr) AddInfo(fileName, tmpName string, fileSize int64, isDir bool) error {
|
||||
err := um.kv.SetInt64(infoKey(tmpName, fileSizeKey), fileSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = um.kv.SetInt64(infoKey(tmpName, uploadedKey), 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return um.kv.SetString(infoKey(tmpName, filePathKey), fileName)
|
||||
}
|
||||
|
||||
func (um *UploadMgr) IncreUploaded(fileName string, newUploaded int64) error {
|
||||
fileSize, ok := um.kv.GetInt64(infoKey(fileName, fileSizeKey))
|
||||
if !ok {
|
||||
return fmt.Errorf("file size %s not found", fileName)
|
||||
}
|
||||
preUploaded, ok := um.kv.GetInt64(infoKey(fileName, uploadedKey))
|
||||
if !ok {
|
||||
return fmt.Errorf("file uploaded %s not found", fileName)
|
||||
}
|
||||
if newUploaded+preUploaded <= fileSize {
|
||||
um.kv.SetInt64(infoKey(fileName, uploadedKey), newUploaded+preUploaded)
|
||||
return nil
|
||||
}
|
||||
return errors.New("uploaded is greater than file size")
|
||||
}
|
||||
|
||||
func (um *UploadMgr) GetInfo(fileName string) (string, int64, int64, error) {
|
||||
realFilePath, ok := um.kv.GetString(infoKey(fileName, filePathKey))
|
||||
if !ok {
|
||||
return "", 0, 0, os.ErrNotExist
|
||||
}
|
||||
fileSize, ok := um.kv.GetInt64(infoKey(fileName, fileSizeKey))
|
||||
if !ok {
|
||||
return "", 0, 0, os.ErrNotExist
|
||||
}
|
||||
uploaded, ok := um.kv.GetInt64(infoKey(fileName, uploadedKey))
|
||||
if !ok {
|
||||
return "", 0, 0, os.ErrNotExist
|
||||
}
|
||||
|
||||
return realFilePath, fileSize, uploaded, nil
|
||||
}
|
||||
|
||||
func (um *UploadMgr) DelInfo(fileName string) error {
|
||||
if err := um.kv.DelInt64(infoKey(fileName, fileSizeKey)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := um.kv.DelInt64(infoKey(fileName, uploadedKey)); err != nil {
|
||||
return err
|
||||
}
|
||||
return um.kv.DelString(infoKey(fileName, filePathKey))
|
||||
}
|
||||
|
||||
func infoKey(fileName, key string) string {
|
||||
return fmt.Sprintf("%s:%s", fileName, key)
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func Login(ctx *gin.Context) {}
|
||||
|
||||
func Logout(ctx *gin.Context) {}
|
75
src/handlers/singleuserhdr/handlers.go
Normal file
75
src/handlers/singleuserhdr/handlers.go
Normal file
|
@ -0,0 +1,75 @@
|
|||
package singleuserhdr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/ihexxa/gocfg"
|
||||
|
||||
"github.com/ihexxa/quickshare/src/depidx"
|
||||
q "github.com/ihexxa/quickshare/src/handlers"
|
||||
)
|
||||
|
||||
var ErrInvalidUser = errors.New("invalid user name or password")
|
||||
|
||||
type SimpleUserHandlers struct {
|
||||
cfg gocfg.ICfg
|
||||
deps *depidx.Deps
|
||||
}
|
||||
|
||||
func NewSimpleUserHandlers(cfg gocfg.ICfg, deps *depidx.Deps) *SimpleUserHandlers {
|
||||
return &SimpleUserHandlers{
|
||||
cfg: cfg,
|
||||
deps: deps,
|
||||
}
|
||||
}
|
||||
|
||||
func (hdr *SimpleUserHandlers) Login(c *gin.Context) {
|
||||
userName := c.Query("username")
|
||||
pwd := c.Query("pwd")
|
||||
if userName == "" || pwd == "" {
|
||||
c.JSON(q.ErrResp(c, 400, ErrInvalidUser))
|
||||
return
|
||||
}
|
||||
|
||||
expectedName, ok1 := hdr.deps.KV().GetString("username")
|
||||
expectedPwd, ok2 := hdr.deps.KV().GetString("pwd")
|
||||
if !ok1 || !ok2 {
|
||||
c.JSON(q.ErrResp(c, 400, ErrInvalidUser))
|
||||
return
|
||||
}
|
||||
|
||||
if userName != expectedName || pwd != expectedPwd {
|
||||
c.JSON(q.ErrResp(c, 400, ErrInvalidUser))
|
||||
return
|
||||
}
|
||||
token, err := hdr.deps.Token().ToToken(map[string]string{
|
||||
"username": expectedName,
|
||||
})
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 500, err))
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: use config
|
||||
c.SetCookie("token", token, 3600, "/", "localhost", false, true)
|
||||
c.JSON(q.Resp(200))
|
||||
}
|
||||
|
||||
func (hdr *SimpleUserHandlers) Logout(c *gin.Context) {
|
||||
token, err := c.Cookie("token")
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 400, err))
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: // check if token expired
|
||||
_, err = hdr.deps.Token().FromToken(token, map[string]string{"token": ""})
|
||||
if err != nil {
|
||||
c.JSON(q.ErrResp(c, 400, err))
|
||||
return
|
||||
}
|
||||
|
||||
c.SetCookie("token", "", 0, "/", "localhost", false, true)
|
||||
c.JSON(q.Resp(200))
|
||||
}
|
104
src/handlers/util.go
Normal file
104
src/handlers/util.go
Normal file
|
@ -0,0 +1,104 @@
|
|||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
var statusCodes = map[int]string{
|
||||
100: "Continue", // RFC 7231, 6.2.1
|
||||
101: "SwitchingProtocols", // RFC 7231, 6.2.2
|
||||
102: "Processing", // RFC 2518, 10.1
|
||||
103: "EarlyHints", // RFC 8297
|
||||
200: "OK", // RFC 7231, 6.3.1
|
||||
201: "Created", // RFC 7231, 6.3.2
|
||||
202: "Accepted", // RFC 7231, 6.3.3
|
||||
203: "NonAuthoritativeInfo", // RFC 7231, 6.3.4
|
||||
204: "NoContent", // RFC 7231, 6.3.5
|
||||
205: "ResetContent", // RFC 7231, 6.3.6
|
||||
206: "PartialContent", // RFC 7233, 4.1
|
||||
207: "MultiStatus", // RFC 4918, 11.1
|
||||
208: "AlreadyReported", // RFC 5842, 7.1
|
||||
226: "IMUsed", // RFC 3229, 10.4.1
|
||||
300: "MultipleChoices", // RFC 7231, 6.4.1
|
||||
301: "MovedPermanently", // RFC 7231, 6.4.2
|
||||
302: "Found", // RFC 7231, 6.4.3
|
||||
303: "SeeOther", // RFC 7231, 6.4.4
|
||||
304: "NotModified", // RFC 7232, 4.1
|
||||
305: "UseProxy", // RFC 7231, 6.4.5
|
||||
307: "TemporaryRedirect", // RFC 7231, 6.4.7
|
||||
308: "PermanentRedirect", // RFC 7538, 3
|
||||
400: "BadRequest", // RFC 7231, 6.5.1
|
||||
401: "Unauthorized", // RFC 7235, 3.1
|
||||
402: "PaymentRequired", // RFC 7231, 6.5.2
|
||||
403: "Forbidden", // RFC 7231, 6.5.3
|
||||
404: "NotFound", // RFC 7231, 6.5.4
|
||||
405: "MethodNotAllowed", // RFC 7231, 6.5.5
|
||||
406: "NotAcceptable", // RFC 7231, 6.5.6
|
||||
407: "ProxyAuthRequired", // RFC 7235, 3.2
|
||||
408: "RequestTimeout", // RFC 7231, 6.5.7
|
||||
409: "Conflict", // RFC 7231, 6.5.8
|
||||
410: "Gone", // RFC 7231, 6.5.9
|
||||
411: "LengthRequired", // RFC 7231, 6.5.10
|
||||
412: "PreconditionFailed", // RFC 7232, 4.2
|
||||
413: "RequestEntityTooLarge", // RFC 7231, 6.5.11
|
||||
414: "RequestURITooLong", // RFC 7231, 6.5.12
|
||||
415: "UnsupportedMediaType", // RFC 7231, 6.5.13
|
||||
416: "RequestedRangeNotSatisfiable", // RFC 7233, 4.4
|
||||
417: "ExpectationFailed", // RFC 7231, 6.5.14
|
||||
418: "Teapot", // RFC 7168, 2.3.3
|
||||
421: "MisdirectedRequest", // RFC 7540, 9.1.2
|
||||
422: "UnprocessableEntity", // RFC 4918, 11.2
|
||||
423: "Locked", // RFC 4918, 11.3
|
||||
424: "FailedDependency", // RFC 4918, 11.4
|
||||
425: "TooEarly", // RFC 8470, 5.2.
|
||||
426: "UpgradeRequired", // RFC 7231, 6.5.15
|
||||
428: "PreconditionRequired", // RFC 6585, 3
|
||||
429: "TooManyRequests", // RFC 6585, 4
|
||||
431: "RequestHeaderFieldsTooLarge", // RFC 6585, 5
|
||||
451: "UnavailableForLegalReasons", // RFC 7725, 3
|
||||
500: "InternalServerError", // RFC 7231, 6.6.1
|
||||
501: "NotImplemented", // RFC 7231, 6.6.2
|
||||
502: "BadGateway", // RFC 7231, 6.6.3
|
||||
503: "ServiceUnavailable", // RFC 7231, 6.6.4
|
||||
504: "GatewayTimeout", // RFC 7231, 6.6.5
|
||||
505: "HTTPVersionNotSupported", // RFC 7231, 6.6.6
|
||||
506: "VariantAlsoNegotiates", // RFC 2295, 8.1
|
||||
507: "InsufficientStorage", // RFC 4918, 11.5
|
||||
508: "LoopDetected", // RFC 5842, 7.2
|
||||
510: "NotExtended", // RFC 2774, 7
|
||||
511: "NetworkAuthenticationRequired", // RFC 6585, 6
|
||||
}
|
||||
|
||||
type MsgResp struct {
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
func NewMsgResp(code int, msg string) (int, interface{}) {
|
||||
_, ok := statusCodes[code]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("status code not found %d", code))
|
||||
}
|
||||
return code, &MsgResp{Msg: msg}
|
||||
|
||||
}
|
||||
|
||||
func Resp(code int) (int, interface{}) {
|
||||
msg, ok := statusCodes[code]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("status code not found %d", code))
|
||||
}
|
||||
return code, &MsgResp{Msg: msg}
|
||||
|
||||
}
|
||||
|
||||
func ErrResp(c *gin.Context, code int, err error) (int, interface{}) {
|
||||
_, ok := statusCodes[code]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("status code not found %d", code))
|
||||
}
|
||||
gErr := c.Error(err)
|
||||
return code, gErr.JSON()
|
||||
|
||||
}
|
5
src/idgen/idgen_interface.go
Normal file
5
src/idgen/idgen_interface.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
package idgen
|
||||
|
||||
type IIDGen interface {
|
||||
Gen() uint64
|
||||
}
|
27
src/idgen/simpleidgen/simple_id_gen.go
Normal file
27
src/idgen/simpleidgen/simple_id_gen.go
Normal file
|
@ -0,0 +1,27 @@
|
|||
package simpleidgen
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var lastID = uint64(0)
|
||||
var mux = &sync.Mutex{}
|
||||
|
||||
type SimpleIDGen struct{}
|
||||
|
||||
func New() *SimpleIDGen {
|
||||
return &SimpleIDGen{}
|
||||
}
|
||||
|
||||
func (id *SimpleIDGen) Gen() uint64 {
|
||||
mux.Lock()
|
||||
defer mux.Unlock()
|
||||
newID := uint64(time.Now().UnixNano())
|
||||
if newID != lastID {
|
||||
lastID = newID
|
||||
return lastID
|
||||
}
|
||||
lastID = newID + 1
|
||||
return lastID
|
||||
}
|
225
src/kvstore/boltdbpvd/provider.go
Normal file
225
src/kvstore/boltdbpvd/provider.go
Normal file
|
@ -0,0 +1,225 @@
|
|||
package boltdbpvd
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
|
||||
"github.com/ihexxa/quickshare/src/kvstore"
|
||||
)
|
||||
|
||||
type BoltPvd struct {
|
||||
dbPath string
|
||||
db *bolt.DB
|
||||
maxStrLen int
|
||||
}
|
||||
|
||||
func New(dbPath string, maxStrLen int) *BoltPvd {
|
||||
boltPath := path.Join(path.Clean(dbPath), "quickshare.db")
|
||||
db, err := bolt.Open(boltPath, 0600, &bolt.Options{Timeout: 1 * time.Second})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
buckets := []string{"bools", "ints", "int64s", "floats", "strings", "locks"}
|
||||
for _, bucketName := range buckets {
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(bucketName))
|
||||
if b != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := tx.CreateBucket([]byte(bucketName))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return &BoltPvd{
|
||||
dbPath: dbPath,
|
||||
db: db,
|
||||
maxStrLen: maxStrLen,
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) Close() error {
|
||||
return bp.db.Close()
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) GetBool(key string) (bool, bool) {
|
||||
buf, ok := make([]byte, 1), false
|
||||
|
||||
bp.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("bools"))
|
||||
v := b.Get([]byte(key))
|
||||
copy(buf, v)
|
||||
ok = v != nil
|
||||
return nil
|
||||
})
|
||||
|
||||
// 1 means true, 0 means false
|
||||
return buf[0] == 1, ok
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) SetBool(key string, val bool) error {
|
||||
var bVal byte = 0
|
||||
if val {
|
||||
bVal = 1
|
||||
}
|
||||
return bp.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("bools"))
|
||||
return b.Put([]byte(key), []byte{bVal})
|
||||
})
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) DelBool(key string) error {
|
||||
return bp.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("bools"))
|
||||
return b.Delete([]byte(key))
|
||||
})
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) GetInt(key string) (int, bool) {
|
||||
x, ok := bp.GetInt64(key)
|
||||
return int(x), ok
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) SetInt(key string, val int) error {
|
||||
return bp.SetInt64(key, int64(val))
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) DelInt(key string) error {
|
||||
return bp.DelInt64(key)
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) GetInt64(key string) (int64, bool) {
|
||||
buf, ok := make([]byte, binary.MaxVarintLen64), false
|
||||
|
||||
bp.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("int64s"))
|
||||
v := b.Get([]byte(key))
|
||||
copy(buf, v)
|
||||
ok = v != nil
|
||||
return nil
|
||||
})
|
||||
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
x, n := binary.Varint(buf)
|
||||
if n < 0 {
|
||||
return 0, false
|
||||
}
|
||||
return x, true
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) SetInt64(key string, val int64) error {
|
||||
buf := make([]byte, binary.MaxVarintLen64)
|
||||
n := binary.PutVarint(buf, val)
|
||||
|
||||
return bp.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("int64s"))
|
||||
return b.Put([]byte(key), buf[:n])
|
||||
})
|
||||
}
|
||||
func (bp *BoltPvd) DelInt64(key string) error {
|
||||
return bp.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("int64s"))
|
||||
return b.Delete([]byte(key))
|
||||
})
|
||||
}
|
||||
|
||||
func float64ToBytes(num float64) []byte {
|
||||
buf := make([]byte, 64)
|
||||
binary.PutUvarint(buf, math.Float64bits(num))
|
||||
return buf
|
||||
}
|
||||
|
||||
func bytesToFloat64(buf []byte) float64 {
|
||||
uintVal, _ := binary.Uvarint(buf[:64])
|
||||
return math.Float64frombits(uintVal)
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) GetFloat(key string) (float64, bool) {
|
||||
buf, ok := make([]byte, 64), false
|
||||
bp.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("floats"))
|
||||
v := b.Get([]byte(key))
|
||||
copy(buf, v)
|
||||
ok = v != nil
|
||||
return nil
|
||||
})
|
||||
if !ok {
|
||||
return 0.0, false
|
||||
}
|
||||
return bytesToFloat64(buf), true
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) SetFloat(key string, val float64) error {
|
||||
return bp.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("floats"))
|
||||
return b.Put([]byte(key), float64ToBytes(val))
|
||||
})
|
||||
}
|
||||
func (bp *BoltPvd) DelFloat(key string) error {
|
||||
return bp.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("floats"))
|
||||
return b.Delete([]byte(key))
|
||||
})
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) GetString(key string) (string, bool) {
|
||||
buf, ok, length := make([]byte, bp.maxStrLen), false, bp.maxStrLen
|
||||
bp.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("strings"))
|
||||
v := b.Get([]byte(key))
|
||||
length = copy(buf, v)
|
||||
ok = v != nil
|
||||
return nil
|
||||
})
|
||||
return string(buf[:length]), ok
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) SetString(key string, val string) error {
|
||||
if len(val) > bp.maxStrLen {
|
||||
return fmt.Errorf("can not set string value longer than %d", bp.maxStrLen)
|
||||
}
|
||||
|
||||
return bp.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("strings"))
|
||||
return b.Put([]byte(key), []byte(val))
|
||||
})
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) DelString(key string) error {
|
||||
return bp.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("strings"))
|
||||
return b.Delete([]byte(key))
|
||||
})
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) TryLock(key string) error {
|
||||
return bp.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("locks"))
|
||||
if b.Get([]byte(key)) != nil {
|
||||
return kvstore.ErrLocked
|
||||
}
|
||||
return b.Put([]byte(key), []byte{})
|
||||
})
|
||||
}
|
||||
|
||||
func (bp *BoltPvd) Unlock(key string) error {
|
||||
return bp.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("locks"))
|
||||
if b.Get([]byte(key)) != nil {
|
||||
return b.Delete([]byte(key))
|
||||
}
|
||||
return kvstore.ErrNoLock
|
||||
})
|
||||
}
|
26
src/kvstore/kvstore_interface.go
Normal file
26
src/kvstore/kvstore_interface.go
Normal file
|
@ -0,0 +1,26 @@
|
|||
package kvstore
|
||||
|
||||
import "errors"
|
||||
|
||||
var ErrLocked = errors.New("already locked")
|
||||
var ErrNoLock = errors.New("no lock to unlock")
|
||||
|
||||
type IKVStore interface {
|
||||
GetBool(key string) (bool, bool)
|
||||
SetBool(key string, val bool) error
|
||||
DelBool(key string) error
|
||||
GetInt(key string) (int, bool)
|
||||
SetInt(key string, val int) error
|
||||
DelInt(key string) error
|
||||
GetInt64(key string) (int64, bool)
|
||||
SetInt64(key string, val int64) error
|
||||
DelInt64(key string) error
|
||||
GetFloat(key string) (float64, bool)
|
||||
SetFloat(key string, val float64) error
|
||||
DelFloat(key string) error
|
||||
GetString(key string) (string, bool)
|
||||
SetString(key string, val string) error
|
||||
DelString(key string) error
|
||||
TryLock(key string) error
|
||||
Unlock(key string) error
|
||||
}
|
166
src/kvstore/memstore/provider.go
Normal file
166
src/kvstore/memstore/provider.go
Normal file
|
@ -0,0 +1,166 @@
|
|||
package memstore
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/ihexxa/quickshare/src/kvstore"
|
||||
)
|
||||
|
||||
type MemStore struct {
|
||||
bools map[string]bool
|
||||
boolsMtx *sync.Mutex
|
||||
ints map[string]int
|
||||
intsMtx *sync.Mutex
|
||||
int64s map[string]int64
|
||||
int64sMtx *sync.Mutex
|
||||
floats map[string]float64
|
||||
floatsMtx *sync.Mutex
|
||||
strings map[string]string
|
||||
stringsMtx *sync.Mutex
|
||||
locks map[string]bool
|
||||
locksMtx *sync.Mutex
|
||||
}
|
||||
|
||||
func New() *MemStore {
|
||||
return &MemStore{
|
||||
bools: map[string]bool{},
|
||||
boolsMtx: &sync.Mutex{},
|
||||
ints: map[string]int{},
|
||||
intsMtx: &sync.Mutex{},
|
||||
int64s: map[string]int64{},
|
||||
int64sMtx: &sync.Mutex{},
|
||||
floats: map[string]float64{},
|
||||
floatsMtx: &sync.Mutex{},
|
||||
strings: map[string]string{},
|
||||
stringsMtx: &sync.Mutex{},
|
||||
locks: map[string]bool{},
|
||||
locksMtx: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (st *MemStore) GetBool(key string) (bool, bool) {
|
||||
st.boolsMtx.Lock()
|
||||
defer st.boolsMtx.Unlock()
|
||||
val, ok := st.bools[key]
|
||||
return val, ok
|
||||
}
|
||||
|
||||
func (st *MemStore) SetBool(key string, val bool) error {
|
||||
st.boolsMtx.Lock()
|
||||
defer st.boolsMtx.Unlock()
|
||||
st.bools[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *MemStore) GetInt(key string) (int, bool) {
|
||||
st.intsMtx.Lock()
|
||||
defer st.intsMtx.Unlock()
|
||||
val, ok := st.ints[key]
|
||||
return val, ok
|
||||
}
|
||||
|
||||
func (st *MemStore) SetInt(key string, val int) error {
|
||||
st.intsMtx.Lock()
|
||||
defer st.intsMtx.Unlock()
|
||||
st.ints[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *MemStore) GetInt64(key string) (int64, bool) {
|
||||
st.int64sMtx.Lock()
|
||||
defer st.int64sMtx.Unlock()
|
||||
val, ok := st.int64s[key]
|
||||
return val, ok
|
||||
}
|
||||
|
||||
func (st *MemStore) SetInt64(key string, val int64) error {
|
||||
st.int64sMtx.Lock()
|
||||
defer st.int64sMtx.Unlock()
|
||||
st.int64s[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *MemStore) GetFloat(key string) (float64, bool) {
|
||||
st.floatsMtx.Lock()
|
||||
defer st.floatsMtx.Unlock()
|
||||
val, ok := st.floats[key]
|
||||
return val, ok
|
||||
}
|
||||
|
||||
func (st *MemStore) SetFloat(key string, val float64) error {
|
||||
st.floatsMtx.Lock()
|
||||
defer st.floatsMtx.Unlock()
|
||||
st.floats[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *MemStore) GetString(key string) (string, bool) {
|
||||
st.stringsMtx.Lock()
|
||||
defer st.stringsMtx.Unlock()
|
||||
val, ok := st.strings[key]
|
||||
return val, ok
|
||||
}
|
||||
|
||||
func (st *MemStore) SetString(key string, val string) error {
|
||||
st.stringsMtx.Lock()
|
||||
defer st.stringsMtx.Unlock()
|
||||
st.strings[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *MemStore) DelBool(key string) error {
|
||||
st.boolsMtx.Lock()
|
||||
defer st.boolsMtx.Unlock()
|
||||
delete(st.bools, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *MemStore) DelInt(key string) error {
|
||||
st.intsMtx.Lock()
|
||||
defer st.intsMtx.Unlock()
|
||||
delete(st.ints, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *MemStore) DelInt64(key string) error {
|
||||
st.int64sMtx.Lock()
|
||||
defer st.int64sMtx.Unlock()
|
||||
delete(st.int64s, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *MemStore) DelFloat(key string) error {
|
||||
st.floatsMtx.Lock()
|
||||
defer st.floatsMtx.Unlock()
|
||||
delete(st.floats, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *MemStore) DelString(key string) error {
|
||||
st.stringsMtx.Lock()
|
||||
defer st.stringsMtx.Unlock()
|
||||
delete(st.strings, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *MemStore) TryLock(key string) error {
|
||||
st.stringsMtx.Lock()
|
||||
defer st.stringsMtx.Unlock()
|
||||
_, ok := st.locks[key]
|
||||
if ok {
|
||||
return kvstore.ErrLocked
|
||||
}
|
||||
st.locks[key] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *MemStore) Unlock(key string) error {
|
||||
st.stringsMtx.Lock()
|
||||
defer st.stringsMtx.Unlock()
|
||||
_, ok := st.locks[key]
|
||||
if !ok {
|
||||
return kvstore.ErrNoLock
|
||||
}
|
||||
delete(st.locks, key)
|
||||
return nil
|
||||
}
|
185
src/kvstore/test/provider_test.go
Normal file
185
src/kvstore/test/provider_test.go
Normal file
|
@ -0,0 +1,185 @@
|
|||
package test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ihexxa/quickshare/src/kvstore"
|
||||
"github.com/ihexxa/quickshare/src/kvstore/boltdbpvd"
|
||||
"github.com/ihexxa/quickshare/src/kvstore/memstore"
|
||||
)
|
||||
|
||||
func TestKVStoreProviders(t *testing.T) {
|
||||
var err error
|
||||
var ok bool
|
||||
key, boolV, intV, int64V, floatV, stringV := "key", true, 2027, int64(2027), 3.1415, "foobar"
|
||||
|
||||
kvstoreTest := func(store kvstore.IKVStore, t *testing.T) {
|
||||
// test bools
|
||||
_, ok = store.GetBool(key)
|
||||
if ok {
|
||||
t.Error("value should not exist")
|
||||
}
|
||||
err = store.SetBool(key, boolV)
|
||||
if err != nil {
|
||||
t.Errorf("there should be no error %v", err)
|
||||
}
|
||||
boolVGot, ok := store.GetBool(key)
|
||||
if !ok {
|
||||
t.Error("value should exit")
|
||||
} else if boolVGot != boolV {
|
||||
t.Error(fmt.Sprintln("value not equal", boolVGot, boolV))
|
||||
}
|
||||
err = store.DelBool(key)
|
||||
if err != nil {
|
||||
t.Errorf("there should be no error %v", err)
|
||||
}
|
||||
_, ok = store.GetBool(key)
|
||||
if ok {
|
||||
t.Error("value should not exist")
|
||||
}
|
||||
|
||||
// test ints
|
||||
_, ok = store.GetInt(key)
|
||||
if ok {
|
||||
t.Error("value should not exist")
|
||||
}
|
||||
err = store.SetInt(key, intV)
|
||||
if err != nil {
|
||||
t.Errorf("there should be no error %v", err)
|
||||
}
|
||||
intVGot, ok := store.GetInt(key)
|
||||
if !ok {
|
||||
t.Error("value should exit")
|
||||
} else if intVGot != intV {
|
||||
t.Error(fmt.Sprintln("value not equal", intVGot, intV))
|
||||
}
|
||||
err = store.DelInt(key)
|
||||
if err != nil {
|
||||
t.Errorf("there should be no error %v", err)
|
||||
}
|
||||
_, ok = store.GetInt(key)
|
||||
if ok {
|
||||
t.Error("value should not exist")
|
||||
}
|
||||
|
||||
// test int64s
|
||||
_, ok = store.GetInt64(key)
|
||||
if ok {
|
||||
t.Error("value should not exist")
|
||||
}
|
||||
err = store.SetInt64(key, int64V)
|
||||
if err != nil {
|
||||
t.Errorf("there should be no error %v", err)
|
||||
}
|
||||
int64VGot, ok := store.GetInt64(key)
|
||||
if !ok {
|
||||
t.Error("value should exit")
|
||||
} else if int64VGot != int64V {
|
||||
t.Error(fmt.Sprintln("value not equal", int64VGot, int64V))
|
||||
}
|
||||
err = store.DelInt64(key)
|
||||
if err != nil {
|
||||
t.Errorf("there should be no error %v", err)
|
||||
}
|
||||
_, ok = store.GetInt64(key)
|
||||
if ok {
|
||||
t.Error("value should not exist")
|
||||
}
|
||||
|
||||
// test floats
|
||||
_, ok = store.GetFloat(key)
|
||||
if ok {
|
||||
t.Error("value should not exist")
|
||||
}
|
||||
err = store.SetFloat(key, floatV)
|
||||
if err != nil {
|
||||
t.Errorf("there should be no error %v", err)
|
||||
}
|
||||
floatVGot, ok := store.GetFloat(key)
|
||||
if !ok {
|
||||
t.Error("value should exit")
|
||||
} else if floatVGot != floatV {
|
||||
t.Error(fmt.Sprintln("value not equal", floatVGot, floatV))
|
||||
}
|
||||
err = store.DelFloat(key)
|
||||
if err != nil {
|
||||
t.Errorf("there should be no error %v", err)
|
||||
}
|
||||
_, ok = store.GetFloat(key)
|
||||
if ok {
|
||||
t.Error("value should not exist")
|
||||
}
|
||||
|
||||
// test strings
|
||||
_, ok = store.GetString(key)
|
||||
if ok {
|
||||
t.Error("value should not exist")
|
||||
}
|
||||
err = store.SetString(key, stringV)
|
||||
if err != nil {
|
||||
t.Errorf("there should be no error %v", err)
|
||||
}
|
||||
stringVGot, ok := store.GetString(key)
|
||||
if !ok {
|
||||
t.Error("value should exit")
|
||||
} else if stringVGot != stringV {
|
||||
t.Error(fmt.Sprintln("value not equal", stringVGot, stringV))
|
||||
}
|
||||
err = store.DelString(key)
|
||||
if err != nil {
|
||||
t.Errorf("there should be no error %v", err)
|
||||
}
|
||||
_, ok = store.GetString(key)
|
||||
if ok {
|
||||
t.Error("value should not exist")
|
||||
}
|
||||
|
||||
// test locks
|
||||
err = store.TryLock(key)
|
||||
if err != nil {
|
||||
t.Errorf("there should be no error %v", err)
|
||||
}
|
||||
err = store.TryLock(key)
|
||||
if err == nil || err != kvstore.ErrLocked {
|
||||
t.Error("there should be locked")
|
||||
}
|
||||
err = store.TryLock("key2")
|
||||
if err != nil {
|
||||
t.Errorf("there should be no error %v", err)
|
||||
}
|
||||
err = store.Unlock(key)
|
||||
if err != nil {
|
||||
t.Errorf("there should be no error %v", err)
|
||||
}
|
||||
err = store.Unlock("key2")
|
||||
if err != nil {
|
||||
t.Errorf("there should be no error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("test bolt provider", func(t *testing.T) {
|
||||
rootPath, err := ioutil.TempDir("./", "quickshare_kvstore_test_")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
store := boltdbpvd.New(rootPath, 1024)
|
||||
defer store.Close()
|
||||
kvstoreTest(store, t)
|
||||
})
|
||||
|
||||
t.Run("test in-memory provider", func(t *testing.T) {
|
||||
rootPath, err := ioutil.TempDir("./", "quickshare_kvstore_test_")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
store := memstore.New()
|
||||
kvstoreTest(store, t)
|
||||
})
|
||||
}
|
9
src/logging/log_interface.go
Normal file
9
src/logging/log_interface.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
package logging
|
||||
|
||||
type ILogger interface {
|
||||
Debug()
|
||||
Log(values ...interface{})
|
||||
Logf(pattern string, values ...interface{})
|
||||
Error(values ...interface{})
|
||||
Errorf(pattern string, values ...interface{})
|
||||
}
|
32
src/logging/simplelog/simple_log.go
Normal file
32
src/logging/simplelog/simple_log.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
package simplelog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
type SimpleLogger struct {
|
||||
debug bool
|
||||
}
|
||||
|
||||
func NewSimpleLogger() *SimpleLogger {
|
||||
return &SimpleLogger{}
|
||||
}
|
||||
|
||||
func (l *SimpleLogger) Debug() {
|
||||
l.debug = true
|
||||
}
|
||||
|
||||
func (l *SimpleLogger) Log(values ...interface{}) {
|
||||
log.Println(values...)
|
||||
}
|
||||
|
||||
func (l *SimpleLogger) Logf(pattern string, values ...interface{}) {
|
||||
log.Printf(pattern, values...)
|
||||
}
|
||||
func (l *SimpleLogger) Error(values ...interface{}) {
|
||||
log.Println(append([]interface{}{"error:"}, values...)...)
|
||||
}
|
||||
func (l *SimpleLogger) Errorf(pattern string, values ...interface{}) {
|
||||
log.Printf(fmt.Sprintf("error: %s", pattern), values...)
|
||||
}
|
49
src/server/config.go
Normal file
49
src/server/config.go
Normal file
|
@ -0,0 +1,49 @@
|
|||
package server
|
||||
|
||||
type FSConfig struct {
|
||||
Root string `json:"root"`
|
||||
OpensLimit int `json:"opensLimit"`
|
||||
OpenTTL int `json:"openTTL"`
|
||||
}
|
||||
|
||||
type Secrets struct {
|
||||
TokenSecret string `json:"tokenSecret" cfg:"env"`
|
||||
}
|
||||
|
||||
type ServerCfg struct {
|
||||
ProdMode bool `json:"prodMode"`
|
||||
Addr string `json:"addr"`
|
||||
ReadTimeout int `json:"readTimeout"`
|
||||
WriteTimeout int `json:"writeTimeout"`
|
||||
MaxHeaderBytes int `json:"maxHeaderBytes"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Fs *FSConfig `json:"fs"`
|
||||
Secrets *Secrets `json:"secrets"`
|
||||
Server *ServerCfg `json:"server"`
|
||||
}
|
||||
|
||||
func NewEmptyConfig() *Config {
|
||||
return &Config{}
|
||||
}
|
||||
|
||||
func NewDefaultConfig() *Config {
|
||||
return &Config{
|
||||
Fs: &FSConfig{
|
||||
Root: ".",
|
||||
OpensLimit: 128,
|
||||
OpenTTL: 60, // 1 min
|
||||
},
|
||||
Secrets: &Secrets{
|
||||
TokenSecret: "",
|
||||
},
|
||||
Server: &ServerCfg{
|
||||
ProdMode: true,
|
||||
Addr: "127.0.0.1:8888",
|
||||
ReadTimeout: 2000,
|
||||
WriteTimeout: 2000,
|
||||
MaxHeaderBytes: 512,
|
||||
},
|
||||
}
|
||||
}
|
BIN
src/server/quickshare.db
Normal file
BIN
src/server/quickshare.db
Normal file
Binary file not shown.
|
@ -1,67 +1,149 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/ihexxa/gocfg"
|
||||
|
||||
"github.com/ihexxa/quickshare/src/handlers"
|
||||
"github.com/ihexxa/quickshare/src/cryptoutil/jwt"
|
||||
"github.com/ihexxa/quickshare/src/depidx"
|
||||
"github.com/ihexxa/quickshare/src/fs"
|
||||
"github.com/ihexxa/quickshare/src/fs/local"
|
||||
"github.com/ihexxa/quickshare/src/handlers/fileshdr"
|
||||
"github.com/ihexxa/quickshare/src/handlers/singleuserhdr"
|
||||
"github.com/ihexxa/quickshare/src/idgen/simpleidgen"
|
||||
"github.com/ihexxa/quickshare/src/kvstore"
|
||||
"github.com/ihexxa/quickshare/src/kvstore/boltdbpvd"
|
||||
"github.com/ihexxa/quickshare/src/logging/simplelog"
|
||||
"github.com/ihexxa/quickshare/src/uploadmgr"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
server *http.Server
|
||||
}
|
||||
|
||||
type ServerCfg struct {
|
||||
Addr string `json:"addr"`
|
||||
ReadTimeout int `json:"readTimeout"`
|
||||
WriteTimeout int `json:"writeTimeout"`
|
||||
MaxHeaderBytes int `json:"maxHeaderBytes"`
|
||||
deps *depidx.Deps
|
||||
}
|
||||
|
||||
func NewServer(cfg gocfg.ICfg) (*Server, error) {
|
||||
// gin.SetMode(gin.ReleaseMode)
|
||||
deps := initDeps(cfg)
|
||||
|
||||
if cfg.BoolOr("Server.ProdMode", true) {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
}
|
||||
|
||||
router := gin.Default()
|
||||
router, err := addHandlers(router)
|
||||
router, err := addHandlers(router, cfg, deps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
// TODO: set more options
|
||||
Addr: cfg.StringOr("ServerCfg.Addr", ":8080"),
|
||||
Addr: cfg.GrabString("Server.Addr"),
|
||||
Handler: router,
|
||||
ReadTimeout: time.Duration(cfg.IntOr("ServerCfg.ReadTimeout", 1)) * time.Second,
|
||||
WriteTimeout: time.Duration(cfg.IntOr("ServerCfg.ReadTimeout", 1)) * time.Second,
|
||||
MaxHeaderBytes: cfg.IntOr("ServerCfg.MaxHeaderBytes", 512),
|
||||
ReadTimeout: time.Duration(cfg.GrabInt("Server.ReadTimeout")) * time.Millisecond,
|
||||
WriteTimeout: time.Duration(cfg.GrabInt("Server.WriteTimeout")) * time.Millisecond,
|
||||
MaxHeaderBytes: cfg.GrabInt("Server.MaxHeaderBytes"),
|
||||
}
|
||||
|
||||
return &Server{
|
||||
server: srv,
|
||||
deps: deps,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func addHandlers(router *gin.Engine) (*gin.Engine, error) {
|
||||
func (s *Server) depsFS() fs.ISimpleFS {
|
||||
return s.deps.FS()
|
||||
}
|
||||
|
||||
func (s *Server) depsKVStore() kvstore.IKVStore {
|
||||
return s.deps.KV()
|
||||
}
|
||||
|
||||
func makeRandToken() string {
|
||||
b := make([]byte, 32)
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func initDeps(cfg gocfg.ICfg) *depidx.Deps {
|
||||
secret, ok := cfg.String("ENV.TOKENSECRET")
|
||||
if !ok {
|
||||
secret = makeRandToken()
|
||||
fmt.Println("warning: TOKENSECRET is not given, using generated token")
|
||||
}
|
||||
|
||||
rootPath := cfg.GrabString("Fs.Root")
|
||||
opensLimit := cfg.GrabInt("Fs.OpensLimit")
|
||||
openTTL := cfg.GrabInt("Fs.OpenTTL")
|
||||
|
||||
filesystem := local.NewLocalFS(rootPath, 0660, opensLimit, openTTL)
|
||||
jwtEncDec := jwt.NewJWTEncDec(secret)
|
||||
logger := simplelog.NewSimpleLogger()
|
||||
kv := boltdbpvd.New(".", 1024)
|
||||
ider := simpleidgen.New()
|
||||
|
||||
deps := depidx.NewDeps(cfg)
|
||||
deps.SetFS(filesystem)
|
||||
deps.SetToken(jwtEncDec)
|
||||
deps.SetLog(logger)
|
||||
deps.SetKV(kv)
|
||||
deps.SetID(ider)
|
||||
|
||||
uploadMgr, err := uploadmgr.NewUploadMgr(deps)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
deps.SetUploader(uploadMgr)
|
||||
|
||||
return deps
|
||||
}
|
||||
|
||||
func addHandlers(router *gin.Engine, cfg gocfg.ICfg, deps *depidx.Deps) (*gin.Engine, error) {
|
||||
v1 := router.Group("/v1")
|
||||
|
||||
users := v1.Group("/users")
|
||||
users.POST("/login", handlers.Login)
|
||||
users.POST("/logout", handlers.Logout)
|
||||
userHdrs := singleuserhdr.NewSimpleUserHandlers(cfg, deps)
|
||||
users.POST("/login", userHdrs.Login)
|
||||
users.POST("/logout", userHdrs.Logout)
|
||||
|
||||
files := v1.Group("files")
|
||||
files.POST("/upload/", handlers.Upload)
|
||||
files.GET("/list/", handlers.List)
|
||||
files.DELETE("/delete/", handlers.Delete)
|
||||
files.GET("/metadata/", handlers.Metadata)
|
||||
files.POST("/copy/", handlers.Copy)
|
||||
files.POST("/move/", handlers.Move)
|
||||
filesSvc := v1.Group("/fs")
|
||||
fileHdrs, err := fileshdr.NewFileHandlers(cfg, deps)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
filesSvc.POST("/files", fileHdrs.Create)
|
||||
filesSvc.DELETE("/files", fileHdrs.Delete)
|
||||
filesSvc.GET("/files", fileHdrs.Download)
|
||||
filesSvc.PATCH("/files/chunks", fileHdrs.UploadChunk)
|
||||
filesSvc.GET("/files/chunks", fileHdrs.UploadStatus)
|
||||
filesSvc.PATCH("/files/copy", fileHdrs.Copy)
|
||||
filesSvc.PATCH("/files/move", fileHdrs.Move)
|
||||
|
||||
filesSvc.GET("/dirs", fileHdrs.List)
|
||||
filesSvc.POST("/dirs", fileHdrs.Mkdir)
|
||||
// files.POST("/dirs/copy", fileHdrs.CopyDir)
|
||||
|
||||
filesSvc.GET("/metadata", fileHdrs.Metadata)
|
||||
|
||||
return router, nil
|
||||
}
|
||||
|
||||
func (s *Server) Start() error {
|
||||
return s.server.ListenAndServe()
|
||||
err := s.server.ListenAndServe()
|
||||
if err != http.ErrServerClosed {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Shutdown() error {
|
||||
// TODO: add timeout
|
||||
return s.server.Shutdown(context.Background())
|
||||
}
|
||||
|
|
250
src/server/server_files_test.go
Normal file
250
src/server/server_files_test.go
Normal file
|
@ -0,0 +1,250 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ihexxa/gocfg"
|
||||
"github.com/ihexxa/quickshare/src/client"
|
||||
"github.com/ihexxa/quickshare/src/handlers/fileshdr"
|
||||
)
|
||||
|
||||
func startTestServer(config string) *Server {
|
||||
cfg, err := gocfg.New(NewDefaultConfig()).
|
||||
Load(gocfg.JSONStr(config))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
srv, err := NewServer(cfg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
go srv.Start()
|
||||
return srv
|
||||
}
|
||||
|
||||
func TestFileHandlers(t *testing.T) {
|
||||
addr := "http://127.0.0.1:8888"
|
||||
root := "./testData"
|
||||
chunkSize := 2
|
||||
config := `{
|
||||
"Server": {
|
||||
"ProdMode": false
|
||||
},
|
||||
"FS": {
|
||||
"Root": "./testData"
|
||||
}
|
||||
}`
|
||||
|
||||
srv := startTestServer(config)
|
||||
defer srv.Shutdown()
|
||||
// kv := srv.depsKVStore()
|
||||
fs := srv.depsFS()
|
||||
defer os.RemoveAll(root)
|
||||
cl := client.NewQSClient(addr)
|
||||
|
||||
// TODO: remove this
|
||||
time.Sleep(500)
|
||||
|
||||
t.Run("test file APIs: Create-UploadChunk-UploadStatus-Metadata-Delete", func(t *testing.T) {
|
||||
for filePath, content := range map[string]string{
|
||||
"path1/f1.md": "11111",
|
||||
"path1/path2/f2.md": "101010",
|
||||
} {
|
||||
fileSize := int64(len([]byte(content)))
|
||||
// create a file
|
||||
res, _, errs := cl.Create(filePath, fileSize)
|
||||
if len(errs) > 0 {
|
||||
t.Fatal(errs)
|
||||
} else if res.StatusCode != 200 {
|
||||
t.Fatal(res.StatusCode)
|
||||
}
|
||||
|
||||
// check uploading file
|
||||
uploadFilePath := path.Join(fileshdr.UploadDir, fmt.Sprintf("%x", sha1.Sum([]byte(filePath))))
|
||||
info, err := fs.Stat(uploadFilePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if info.Name() != filepath.Base(uploadFilePath) {
|
||||
t.Fatal(info.Name(), filepath.Base(uploadFilePath))
|
||||
}
|
||||
|
||||
// upload a chunk
|
||||
i := 0
|
||||
contentBytes := []byte(content)
|
||||
for i < len(contentBytes) {
|
||||
right := i + chunkSize
|
||||
if right > len(contentBytes) {
|
||||
right = len(contentBytes)
|
||||
}
|
||||
|
||||
res, _, errs = cl.UploadChunk(filePath, string(contentBytes[i:right]), int64(i))
|
||||
i = right
|
||||
if len(errs) > 0 {
|
||||
t.Fatal(errs)
|
||||
} else if res.StatusCode != 200 {
|
||||
t.Fatal(res.StatusCode)
|
||||
}
|
||||
|
||||
if int64(i) != fileSize {
|
||||
_, statusResp, errs := cl.UploadStatus(filePath)
|
||||
if len(errs) > 0 {
|
||||
t.Fatal(errs)
|
||||
} else if statusResp.Path != filePath ||
|
||||
statusResp.IsDir ||
|
||||
statusResp.FileSize != fileSize ||
|
||||
statusResp.Uploaded != int64(i) {
|
||||
t.Fatal("incorrect uploadinfo info", statusResp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check uploaded file
|
||||
fsFilePath := filepath.Join(fileshdr.FsDir, filePath)
|
||||
info, err = fs.Stat(fsFilePath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if info.Name() != filepath.Base(fsFilePath) {
|
||||
t.Fatal(info.Name(), filepath.Base(fsFilePath))
|
||||
}
|
||||
|
||||
// metadata
|
||||
_, mRes, errs := cl.Metadata(filePath)
|
||||
if len(errs) > 0 {
|
||||
t.Fatal(errs)
|
||||
} else if mRes.Name != info.Name() ||
|
||||
mRes.IsDir != info.IsDir() ||
|
||||
mRes.Size != info.Size() {
|
||||
// TODO: modTime is not checked
|
||||
t.Fatal("incorrect uploaded info", mRes)
|
||||
}
|
||||
|
||||
// delete file
|
||||
res, _, errs = cl.Delete(filePath)
|
||||
if len(errs) > 0 {
|
||||
t.Fatal(errs)
|
||||
} else if res.StatusCode != 200 {
|
||||
t.Fatal(res.StatusCode)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("test file APIs: Mkdir-Create-UploadChunk-List", func(t *testing.T) {
|
||||
for dirPath, files := range map[string]map[string]string{
|
||||
"dir/path1/": map[string]string{
|
||||
"f1.md": "11111",
|
||||
"f2.md": "22222222222",
|
||||
},
|
||||
"dir/path1/path2": map[string]string{
|
||||
"f3.md": "3333333",
|
||||
},
|
||||
} {
|
||||
res, _, errs := cl.Mkdir(dirPath)
|
||||
if len(errs) > 0 {
|
||||
t.Fatal(errs)
|
||||
} else if res.StatusCode != 200 {
|
||||
t.Fatal(res.StatusCode)
|
||||
}
|
||||
|
||||
for fileName, content := range files {
|
||||
filePath := filepath.Join(dirPath, fileName)
|
||||
|
||||
fileSize := int64(len([]byte(content)))
|
||||
// create a file
|
||||
res, _, errs := cl.Create(filePath, fileSize)
|
||||
if len(errs) > 0 {
|
||||
t.Fatal(errs)
|
||||
} else if res.StatusCode != 200 {
|
||||
t.Fatal(res.StatusCode)
|
||||
}
|
||||
|
||||
res, _, errs = cl.UploadChunk(filePath, content, 0)
|
||||
if len(errs) > 0 {
|
||||
t.Fatal(errs)
|
||||
} else if res.StatusCode != 200 {
|
||||
t.Fatal(res.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
_, lResp, errs := cl.List(dirPath)
|
||||
if len(errs) > 0 {
|
||||
t.Fatal(errs)
|
||||
}
|
||||
for _, metadata := range lResp.Metadatas {
|
||||
content, ok := files[metadata.Name]
|
||||
if !ok {
|
||||
t.Fatalf("%s not found", metadata.Name)
|
||||
} else if int64(len(content)) != metadata.Size {
|
||||
t.Fatalf("size not match %d %d \n", len(content), metadata.Size)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("test file APIs: Mkdir-Create-UploadChunk-Move-List", func(t *testing.T) {
|
||||
srcDir := "move/src"
|
||||
dstDir := "move/dst"
|
||||
|
||||
for _, dirPath := range []string{srcDir, dstDir} {
|
||||
res, _, errs := cl.Mkdir(dirPath)
|
||||
if len(errs) > 0 {
|
||||
t.Fatal(errs)
|
||||
} else if res.StatusCode != 200 {
|
||||
t.Fatal(res.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
files := map[string]string{
|
||||
"f1.md": "111",
|
||||
"f2.md": "22222",
|
||||
}
|
||||
|
||||
for fileName, content := range files {
|
||||
oldPath := filepath.Join(srcDir, fileName)
|
||||
newPath := filepath.Join(dstDir, fileName)
|
||||
fileSize := int64(len([]byte(content)))
|
||||
|
||||
// create a file
|
||||
res, _, errs := cl.Create(oldPath, fileSize)
|
||||
if len(errs) > 0 {
|
||||
t.Fatal(errs)
|
||||
} else if res.StatusCode != 200 {
|
||||
t.Fatal(res.StatusCode)
|
||||
}
|
||||
|
||||
res, _, errs = cl.UploadChunk(oldPath, content, 0)
|
||||
if len(errs) > 0 {
|
||||
t.Fatal(errs)
|
||||
} else if res.StatusCode != 200 {
|
||||
t.Fatal(res.StatusCode)
|
||||
}
|
||||
|
||||
res, _, errs = cl.Move(oldPath, newPath)
|
||||
if len(errs) > 0 {
|
||||
t.Fatal(errs)
|
||||
} else if res.StatusCode != 200 {
|
||||
t.Fatal(res.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
_, lResp, errs := cl.List(dstDir)
|
||||
if len(errs) > 0 {
|
||||
t.Fatal(errs)
|
||||
}
|
||||
for _, metadata := range lResp.Metadatas {
|
||||
content, ok := files[metadata.Name]
|
||||
if !ok {
|
||||
t.Fatalf("%s not found", metadata.Name)
|
||||
} else if int64(len(content)) != metadata.Size {
|
||||
t.Fatalf("size not match %d %d \n", len(content), metadata.Size)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
110
src/uploadmgr/mgr.go
Normal file
110
src/uploadmgr/mgr.go
Normal file
|
@ -0,0 +1,110 @@
|
|||
package uploadmgr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/ihexxa/quickshare/src/depidx"
|
||||
)
|
||||
|
||||
// TODO:
|
||||
// uploading resumption test
|
||||
// rename file after uploaded
|
||||
// differetiate file and dir
|
||||
|
||||
var ErrBadData = errors.New("file size or uploaded not found for a file")
|
||||
var ErrUploaded = errors.New("file already uploaded")
|
||||
var ErrWriteUploaded = errors.New("try to write acknowledge part")
|
||||
|
||||
type UploadMgr struct {
|
||||
deps *depidx.Deps
|
||||
}
|
||||
|
||||
func NewUploadMgr(deps *depidx.Deps) (*UploadMgr, error) {
|
||||
if deps.KV() == nil {
|
||||
return nil, errors.New("kvstore is not found in deps")
|
||||
}
|
||||
if deps.FS() == nil {
|
||||
return nil, errors.New("fs is not found in deps")
|
||||
}
|
||||
|
||||
return &UploadMgr{
|
||||
deps: deps,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fileSizeKey(filePath string) string { return fmt.Sprintf("%s:size", filePath) }
|
||||
func fileUploadedKey(filePath string) string { return fmt.Sprintf("%s:uploaded", filePath) }
|
||||
|
||||
func (mgr *UploadMgr) Create(filePath string, size int64) error {
|
||||
// _, found := mgr.deps.KV().GetBool(filePath)
|
||||
// if found {
|
||||
// return os.ErrExist
|
||||
// }
|
||||
|
||||
dirPath := path.Dir(filePath)
|
||||
if dirPath != "" {
|
||||
err := mgr.deps.FS().MkdirAll(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := mgr.deps.FS().Create(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// mgr.deps.KV().SetBool(filePath, true)
|
||||
// mgr.deps.KV().SetInt64(fileSizeKey(filePath), size)
|
||||
// mgr.deps.KV().SetInt64(fileUploadedKey(filePath), 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mgr *UploadMgr) WriteChunk(filePath string, chunk []byte, off int64) (int, error) {
|
||||
// _, found := mgr.deps.KV().GetBool(filePath)
|
||||
// if !found {
|
||||
// return 0, os.ErrNotExist
|
||||
// }
|
||||
|
||||
// fileSize, ok1 := mgr.deps.KV().GetInt64(fileSizeKey(filePath))
|
||||
// uploaded, ok2 := mgr.deps.KV().GetInt64(fileUploadedKey(filePath))
|
||||
// if !ok1 || !ok2 {
|
||||
// return 0, ErrBadData
|
||||
// } else if uploaded == fileSize {
|
||||
// return 0, ErrUploaded
|
||||
// } else if off != uploaded {
|
||||
// return 0, ErrWriteUploaded
|
||||
// }
|
||||
|
||||
wrote, err := mgr.deps.FS().WriteAt(filePath, chunk, off)
|
||||
if err != nil {
|
||||
return wrote, err
|
||||
}
|
||||
|
||||
// mgr.deps.KV().SetInt64(fileUploadedKey(filePath), off+int64(wrote))
|
||||
return wrote, nil
|
||||
}
|
||||
|
||||
func (mgr *UploadMgr) Status(filePath string) (int64, bool, error) {
|
||||
// _, found := mgr.deps.KV().GetBool(filePath)
|
||||
// if !found {
|
||||
// return 0, false, os.ErrNotExist
|
||||
// }
|
||||
|
||||
fileSize, ok1 := mgr.deps.KV().GetInt64(fileSizeKey(filePath))
|
||||
fileUploaded, ok2 := mgr.deps.KV().GetInt64(fileUploadedKey(filePath))
|
||||
if !ok1 || !ok2 {
|
||||
return 0, false, ErrBadData
|
||||
}
|
||||
return fileUploaded, fileSize == fileUploaded, nil
|
||||
}
|
||||
|
||||
func (mgr *UploadMgr) Close() error {
|
||||
return mgr.deps.FS().Close()
|
||||
}
|
||||
|
||||
func (mgr *UploadMgr) Sync() error {
|
||||
return mgr.deps.FS().Sync()
|
||||
}
|
155
src/uploadmgr/mgr_test.go
Normal file
155
src/uploadmgr/mgr_test.go
Normal file
|
@ -0,0 +1,155 @@
|
|||
package uploadmgr
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ihexxa/gocfg"
|
||||
"github.com/ihexxa/quickshare/src/depidx"
|
||||
"github.com/ihexxa/quickshare/src/fs"
|
||||
"github.com/ihexxa/quickshare/src/fs/local"
|
||||
"github.com/ihexxa/quickshare/src/kvstore/memstore"
|
||||
"github.com/ihexxa/quickshare/src/server"
|
||||
)
|
||||
|
||||
var debug = flag.Bool("d", false, "debug mode")
|
||||
|
||||
// TODO: teardown after each test case
|
||||
|
||||
func TestUploadMgr(t *testing.T) {
|
||||
rootPath, err := ioutil.TempDir("./", "quickshare_test_")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rootPath)
|
||||
|
||||
newTestUploadMgr := func() (*UploadMgr, fs.ISimpleFS) {
|
||||
cfg := gocfg.New()
|
||||
err := cfg.Load(gocfg.JSONStr("{}"), server.NewEmptyConfig())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
filesystem := local.NewLocalFS(rootPath, 0660, 32, 10)
|
||||
kvstore := memstore.New()
|
||||
|
||||
deps := depidx.NewDeps(cfg)
|
||||
deps.SetFS(filesystem)
|
||||
deps.SetKV(kvstore)
|
||||
|
||||
mgr, err := NewUploadMgr(deps)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return mgr, filesystem
|
||||
}
|
||||
|
||||
t.Run("normal upload", func(t *testing.T) {
|
||||
mgr, _ := newTestUploadMgr()
|
||||
defer mgr.Close()
|
||||
|
||||
testCases := map[string]string{
|
||||
"foo.md": "",
|
||||
"bar.md": "1",
|
||||
"path1/foobar.md": "1110011",
|
||||
}
|
||||
|
||||
for filePath, content := range testCases {
|
||||
err = mgr.Create(filePath, int64(len([]byte(content))))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bytes := []byte(content)
|
||||
for i := 0; i < len(bytes); i++ {
|
||||
wrote, err := mgr.WriteChunk(filePath, bytes[i:i+1], int64(i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if wrote != 1 {
|
||||
t.Fatalf("wrote(%d) != 1", wrote)
|
||||
}
|
||||
}
|
||||
|
||||
if err = mgr.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gotBytes, err := ioutil.ReadFile(path.Join(rootPath, filePath))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(gotBytes) != content {
|
||||
t.Errorf("content not same expected(%s) got(%s)", content, string(gotBytes))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("concurrently upload", func(t *testing.T) {
|
||||
mgr, _ := newTestUploadMgr()
|
||||
defer mgr.Close()
|
||||
|
||||
testCases := []map[string]string{
|
||||
map[string]string{
|
||||
"file20.md": "111",
|
||||
"file21.md": "2222000",
|
||||
"path1/file22.md": "1010011",
|
||||
"path2/file22.md": "1010011",
|
||||
},
|
||||
}
|
||||
|
||||
uploadWorker := func(id int, filePath, content string, wg *sync.WaitGroup) {
|
||||
err = mgr.Create(filePath, int64(len([]byte(content))))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bytes := []byte(content)
|
||||
for i := 0; i < len(bytes); i++ {
|
||||
wrote, err := mgr.WriteChunk(filePath, bytes[i:i+1], int64(i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if wrote != 1 {
|
||||
t.Fatalf("wrote(%d) != 1", wrote)
|
||||
}
|
||||
if *debug {
|
||||
fmt.Printf("worker-%d wrote %s\n", id, string(bytes[i:i+1]))
|
||||
}
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
for _, files := range testCases {
|
||||
wg := &sync.WaitGroup{}
|
||||
workerID := 0
|
||||
for filePath, content := range files {
|
||||
wg.Add(1)
|
||||
go uploadWorker(workerID, filePath, content, wg)
|
||||
workerID++
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if err = mgr.Sync(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for filePath, content := range files {
|
||||
gotBytes, err := ioutil.ReadFile(path.Join(rootPath, filePath))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(gotBytes) != content {
|
||||
t.Errorf("content not same expected(%s) got(%s)", content, string(gotBytes))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue