This commit is contained in:
hexxa 2021-09-11 20:06:01 +08:00 committed by Hexxa
parent e47ee4aa8c
commit 8518072c7e
8 changed files with 134 additions and 30 deletions

View file

@ -31,6 +31,7 @@ export interface MetadataResp {
size: number;
modTime: string;
isDir: boolean;
sha1: string;
}
export interface UploadStatusResp {

View file

@ -453,6 +453,8 @@ export class Browser extends React.Component<Props, State, {}> {
</span>
&nbsp;/&nbsp;
<span>{FileSize(item.size, { round: 0 })}</span>
&nbsp;/&nbsp;
<span>{item.sha1}</span>
</div>
</span>,
])}

View file

@ -50,9 +50,9 @@ export class PaneSettings extends React.Component<Props, State, {}> {
.setPwd(this.state.oldPwd, this.state.newPwd1)
.then((ok: boolean) => {
if (ok) {
alertMsg(this.props.msg.pkg.get("settings.pwd.updated"));
alertMsg(this.props.msg.pkg.get("update.ok"));
} else {
alertMsg(this.props.msg.pkg.get("settings.pwd.fail"));
alertMsg(this.props.msg.pkg.get("update.fail"));
}
this.setState({
oldPwd: "",

View file

@ -39,6 +39,7 @@ type IFileInfoStore interface {
SetInfo(itemPath string, info *FileInfo) error
DelInfo(itemPath string) error
SetSha1(itemPath, sign string) error
GetInfos(itemPaths []string) (map[string]*FileInfo, error)
}
type FileInfoStore struct {
@ -145,6 +146,22 @@ func (fi *FileInfoStore) GetInfo(itemPath string) (*FileInfo, error) {
return info, nil
}
func (fi *FileInfoStore) GetInfos(itemPaths []string) (map[string]*FileInfo, error) {
infos := map[string]*FileInfo{}
for _, itemPath := range itemPaths {
info, err := fi.GetInfo(itemPath)
if err != nil {
if !IsNotFound(err) {
return nil, err
}
continue
}
infos[itemPath] = info
}
return infos, nil
}
func (fi *FileInfoStore) SetInfo(itemPath string, info *FileInfo) error {
infoStr, err := json.Marshal(info)
if err != nil {

View file

@ -232,6 +232,7 @@ type MetadataResp struct {
Size int64 `json:"size"`
ModTime time.Time `json:"modTime"`
IsDir bool `json:"isDir"`
Sha1 string `json"sha1"`
}
func (h *FileHandlers) Metadata(c *gin.Context) {
@ -610,6 +611,37 @@ type ListResp struct {
Metadatas []*MetadataResp `json:"metadatas"`
}
func (h *FileHandlers) MergeFileInfos(dirPath string, infos []os.FileInfo) ([]*MetadataResp, error) {
filePaths := []string{}
metadatas := []*MetadataResp{}
for _, info := range infos {
if !info.IsDir() {
filePaths = append(filePaths, filepath.Join(dirPath, info.Name()))
}
metadatas = append(metadatas, &MetadataResp{
Name: info.Name(),
Size: info.Size(),
ModTime: info.ModTime(),
IsDir: info.IsDir(),
})
}
dbInfos, err := h.deps.FileInfos().GetInfos(filePaths)
if err != nil {
return nil, err
}
for _, metadata := range metadatas {
if !metadata.IsDir {
dbInfo, ok := dbInfos[filepath.Join(dirPath, metadata.Name)]
if ok {
metadata.Sha1 = dbInfo.Sha1
}
}
}
return metadatas, nil
}
func (h *FileHandlers) List(c *gin.Context) {
dirPath := c.Query(ListDirQuery)
if dirPath == "" {
@ -628,14 +660,11 @@ func (h *FileHandlers) List(c *gin.Context) {
c.JSON(q.ErrResp(c, 500, err))
return
}
metadatas := []*MetadataResp{}
for _, info := range infos {
metadatas = append(metadatas, &MetadataResp{
Name: info.Name(),
Size: info.Size(),
ModTime: info.ModTime(),
IsDir: info.IsDir(),
})
metadatas, err := h.MergeFileInfos(dirPath, infos)
if err != nil {
c.JSON(q.ErrResp(c, 500, err))
return
}
c.JSON(200, &ListResp{
@ -647,19 +676,17 @@ func (h *FileHandlers) List(c *gin.Context) {
func (h *FileHandlers) ListHome(c *gin.Context) {
userName := c.MustGet(q.UserParam).(string)
fsPath := q.FsRootPath(userName, "/")
infos, err := h.deps.FS().ListDir(fsPath)
if err != nil {
c.JSON(q.ErrResp(c, 500, err))
return
}
metadatas := []*MetadataResp{}
for _, info := range infos {
metadatas = append(metadatas, &MetadataResp{
Name: info.Name(),
Size: info.Size(),
ModTime: info.ModTime(),
IsDir: info.IsDir(),
})
metadatas, err := h.MergeFileInfos(fsPath, infos)
if err != nil {
c.JSON(q.ErrResp(c, 500, err))
return
}
c.JSON(200, &ListResp{
@ -832,6 +859,38 @@ func (h *FileHandlers) ListSharings(c *gin.Context) {
c.JSON(200, &SharingResp{SharingDirs: dirs})
}
type HashBody struct {
FilePath string `json:"filePath"`
}
func (h *FileHandlers) GenerateHash(c *gin.Context) {
req := &HashBody{}
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(q.ErrResp(c, 400, err))
return
}
role := c.MustGet(q.RoleParam).(string)
userName := c.MustGet(q.UserParam).(string)
if !h.canAccess(userName, role, "hash.gen", req.FilePath) {
c.JSON(q.ErrResp(c, 403, q.ErrAccessDenied))
return
}
err := h.workers.TryPut(
localworker.NewMsg(
h.deps.ID().Gen(),
map[string]string{localworker.MsgTypeKey: "sha1"},
req.FilePath,
),
)
if err != nil {
c.JSON(q.ErrResp(c, 500, err))
return
}
c.JSON(q.Resp(200))
}
func (h *FileHandlers) GetStreamReader(userID uint64, fd io.Reader) (io.Reader, error) {
pr, pw := io.Pipe()
chunkSize := 100 * 1024 // notice: it can not be greater than limiter's token count

View file

@ -69,6 +69,8 @@ func NewMultiUsersSvc(cfg gocfg.ICfg, deps *depidx.Deps) (*MultiUsersSvc, error)
apiRuleCname(userstore.AdminRole, "DELETE", "/v1/fs/sharings"): true,
apiRuleCname(userstore.AdminRole, "GET", "/v1/fs/sharings"): true,
apiRuleCname(userstore.AdminRole, "GET", "/v1/fs/sharings/exist"): true,
apiRuleCname(userstore.AdminRole, "GET", "/hashes/sha1"): true,
// user rules
apiRuleCname(userstore.UserRole, "GET", "/"): true,
apiRuleCname(userstore.UserRole, "GET", publicPath): true,
@ -97,6 +99,7 @@ func NewMultiUsersSvc(cfg gocfg.ICfg, deps *depidx.Deps) (*MultiUsersSvc, error)
apiRuleCname(userstore.UserRole, "DELETE", "/v1/fs/sharings"): true,
apiRuleCname(userstore.UserRole, "GET", "/v1/fs/sharings"): true,
apiRuleCname(userstore.UserRole, "GET", "/v1/fs/sharings/exist"): true,
apiRuleCname(userstore.AdminRole, "GET", "/hashes/sha1"): true,
// visitor rules
apiRuleCname(userstore.VisitorRole, "GET", "/"): true,
apiRuleCname(userstore.VisitorRole, "GET", publicPath): true,

View file

@ -8,7 +8,9 @@ import (
"fmt"
"net/http"
"os"
"os/signal"
"path/filepath"
"syscall"
"time"
"github.com/gin-contrib/static"
@ -37,10 +39,11 @@ import (
)
type Server struct {
server *http.Server
cfg gocfg.ICfg
deps *depidx.Deps
workers worker.IWorkerPool
server *http.Server
cfg gocfg.ICfg
deps *depidx.Deps
workers worker.IWorkerPool
signalChan chan os.Signal
}
func NewServer(cfg gocfg.ICfg) (*Server, error) {
@ -49,8 +52,9 @@ func NewServer(cfg gocfg.ICfg) (*Server, error) {
}
deps := initDeps(cfg)
workers := localworker.NewWorkerPool(1024, 5000, 2, deps)
router := gin.Default()
router, err := initHandlers(router, cfg, deps)
router, err := initHandlers(router, cfg, deps, workers)
if err != nil {
return nil, err
}
@ -64,9 +68,10 @@ func NewServer(cfg gocfg.ICfg) (*Server, error) {
}
return &Server{
server: srv,
deps: deps,
cfg: cfg,
server: srv,
deps: deps,
cfg: cfg,
workers: workers,
}, nil
}
@ -130,10 +135,7 @@ func initDeps(cfg gocfg.ICfg) *depidx.Deps {
return deps
}
func initHandlers(router *gin.Engine, cfg gocfg.ICfg, deps *depidx.Deps) (*gin.Engine, error) {
// workers
workers := localworker.NewWorkerPool(1024, 5000, 2, deps)
func initHandlers(router *gin.Engine, cfg gocfg.ICfg, deps *depidx.Deps, workers worker.IWorkerPool) (*gin.Engine, error) {
// handlers
userHdrs, err := multiusers.NewMultiUsersSvc(cfg, deps)
if err != nil {
@ -237,6 +239,8 @@ func initHandlers(router *gin.Engine, cfg gocfg.ICfg, deps *depidx.Deps) (*gin.E
filesAPI.GET("/metadata", fileHdrs.Metadata)
filesAPI.POST("/hashes/sha1", fileHdrs.GenerateHash)
settingsAPI := v1.Group("/settings")
settingsAPI.OPTIONS("/health", settingsSvc.Health)
@ -263,6 +267,18 @@ func initLogger(cfg gocfg.ICfg) *zap.SugaredLogger {
}
func (s *Server) Start() error {
s.signalChan = make(chan os.Signal, 4)
signal.Notify(s.signalChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
sig := <-s.signalChan
if sig != nil {
s.deps.Log().Infow(
fmt.Sprintf("received signal %s: shutting down\n", sig.String()),
)
}
s.Shutdown()
}()
s.deps.Log().Infow(
"quickshare is starting",
"hostname:port",
@ -282,6 +298,7 @@ func (s *Server) Start() error {
func (s *Server) Shutdown() error {
// TODO: add timeout
s.workers.Stop()
s.deps.Log().Sync()
return s.server.Shutdown(context.Background())
}

View file

@ -88,7 +88,12 @@ func (wp *WorkerPool) Start() {
}
func (wp *WorkerPool) Stop() {
defer close(wp.queue)
wp.on = false
for wp.started > 0 {
wp.deps.Log().Errorf(fmt.Sprintf("%d workers still in working", wp.started))
time.Sleep(time.Duration(1) * time.Second)
}
}
func (wp *WorkerPool) startWorker() {