aboutsummaryrefslogtreecommitdiff
path: root/fs.go
diff options
context:
space:
mode:
authorGravatar Tiago Peczenyj <tpeczenyj@weborama.com> 2023-11-05 14:46:42 +0100
committerGravatar GitHub <noreply@github.com> 2023-11-05 14:46:42 +0100
commit4010b16eef6632331ba564dc76174a3db7ff4903 (patch)
treef6421eb3d5ac5f93ade2d9f692b9249bf3847da3 /fs.go
parentAllow redirect URI path to not be normalized. (#1638) (diff)
downloadfasthttp-4010b16eef6632331ba564dc76174a3db7ff4903.tar.gz
fasthttp-4010b16eef6632331ba564dc76174a3db7ff4903.tar.bz2
fasthttp-4010b16eef6632331ba564dc76174a3db7ff4903.zip
Add support to fs.fs on serve static files (#1640)
* substitute *os.File by fs.File * refactor error handling by using the new recommended form * finish implementation * substitute seek(offset,0) by seek(offset, io.SeekStart) * add unit test * use io.SeekStart on Seek method
Diffstat (limited to 'fs.go')
-rw-r--r--fs.go238
1 files changed, 194 insertions, 44 deletions
diff --git a/fs.go b/fs.go
index fc679de..4d8c1c1 100644
--- a/fs.go
+++ b/fs.go
@@ -6,6 +6,7 @@ import (
"fmt"
"html"
"io"
+ "io/fs"
"mime"
"net/http"
"os"
@@ -136,6 +137,32 @@ var (
rootFSHandler RequestHandler
)
+// ServeFS returns HTTP response containing compressed file contents from the given fs.FS's path.
+//
+// HTTP response may contain uncompressed file contents in the following cases:
+//
+// - Missing 'Accept-Encoding: gzip' request header.
+// - No write access to directory containing the file.
+//
+// Directory contents is returned if path points to directory.
+//
+// See also ServeFile.
+func ServeFS(ctx *RequestCtx, filesystem fs.FS, path string) {
+ f := &FS{
+ FS: filesystem,
+ Root: "",
+ AllowEmptyRoot: true,
+ GenerateIndexPages: true,
+ Compress: true,
+ CompressBrotli: true,
+ AcceptByteRange: true,
+ }
+ handler := f.NewRequestHandler()
+
+ ctx.Request.SetRequestURI(path)
+ handler(ctx)
+}
+
// PathRewriteFunc must return new request path based on arbitrary ctx
// info such as ctx.Path().
//
@@ -225,6 +252,9 @@ func NewPathPrefixStripper(prefixSize int) PathRewriteFunc {
type FS struct {
noCopy noCopy
+ // FS is filesystem to serve files from. eg: embed.FS os.DirFS
+ FS fs.FS
+
// Path to the root directory to serve files from.
Root string
@@ -391,16 +421,20 @@ func (fs *FS) NewRequestHandler() RequestHandler {
}
func (fs *FS) normalizeRoot(root string) string {
- // Serve files from the current working directory if Root is empty or if Root is a relative path.
- if (!fs.AllowEmptyRoot && len(root) == 0) || (len(root) > 0 && !filepath.IsAbs(root)) {
- path, err := os.Getwd()
- if err != nil {
- path = "."
+ // fs.FS uses relative paths, that paths are slash-separated on all systems, even Windows.
+ if fs.FS == nil {
+ // Serve files from the current working directory if Root is empty or if Root is a relative path.
+ if (!fs.AllowEmptyRoot && len(root) == 0) || (len(root) > 0 && !filepath.IsAbs(root)) {
+ path, err := os.Getwd()
+ if err != nil {
+ path = "."
+ }
+ root = path + "/" + root
}
- root = path + "/" + root
+
+ // convert the root directory slashes to the native format
+ root = filepath.FromSlash(root)
}
- // convert the root directory slashes to the native format
- root = filepath.FromSlash(root)
// strip trailing slashes from the root path
for len(root) > 0 && root[len(root)-1] == os.PathSeparator {
@@ -440,6 +474,7 @@ func (fs *FS) initRequestHandler() {
}
h := &fsHandler{
+ fs: fs.FS,
root: root,
indexNames: fs.IndexNames,
pathRewrite: fs.PathRewrite,
@@ -456,6 +491,10 @@ func (fs *FS) initRequestHandler() {
cacheGzip: make(map[string]*fsFile),
}
+ if h.fs == nil {
+ h.fs = &osFS{} // It provides os.Open and os.Stat
+ }
+
go func() {
var pendingFiles []*fsFile
@@ -488,6 +527,7 @@ func (fs *FS) initRequestHandler() {
}
type fsHandler struct {
+ fs fs.FS
root string
indexNames []string
pathRewrite PathRewriteFunc
@@ -510,7 +550,8 @@ type fsHandler struct {
type fsFile struct {
h *fsHandler
- f *os.File
+ f fs.File
+ filename string // fs.FileInfo.Name() return filename, isn't filepath.
dirIndex []byte
contentType string
contentLength int
@@ -555,6 +596,9 @@ func (ff *fsFile) smallFileReader() (io.Reader, error) {
const maxSmallFileSize = 2 * 4096
func (ff *fsFile) isBig() bool {
+ if _, ok := ff.h.fs.(*osFS); !ok { // fs.FS only uses bigFileReader, memory cache uses fsSmallFileReader
+ return ff.f != nil
+ }
return ff.contentLength > maxSmallFileSize && len(ff.dirIndex) == 0
}
@@ -577,7 +621,7 @@ func (ff *fsFile) bigFileReader() (io.Reader, error) {
return r, nil
}
- f, err := os.Open(ff.f.Name())
+ f, err := ff.h.fs.Open(ff.filename)
if err != nil {
return nil, fmt.Errorf("cannot open already opened file: %w", err)
}
@@ -614,14 +658,18 @@ func (ff *fsFile) decReadersCount() {
// bigFileReader attempts to trigger sendfile
// for sending big files over the wire.
type bigFileReader struct {
- f *os.File
+ f fs.File
ff *fsFile
r io.Reader
lr io.LimitedReader
}
func (r *bigFileReader) UpdateByteRange(startPos, endPos int) error {
- if _, err := r.f.Seek(int64(startPos), 0); err != nil {
+ seeker, ok := r.f.(io.Seeker)
+ if !ok {
+ return errors.New("must implement io.Seeker")
+ }
+ if _, err := seeker.Seek(int64(startPos), io.SeekStart); err != nil {
return err
}
r.r = &r.lr
@@ -646,7 +694,12 @@ func (r *bigFileReader) WriteTo(w io.Writer) (int64, error) {
func (r *bigFileReader) Close() error {
r.r = r.f
- n, err := r.f.Seek(0, 0)
+ seeker, ok := r.f.(io.Seeker)
+ if !ok {
+ _ = r.f.Close()
+ return errors.New("must implement io.Seeker")
+ }
+ n, err := seeker.Seek(0, io.SeekStart)
if err == nil {
if n == 0 {
ff := r.ff
@@ -655,7 +708,7 @@ func (r *bigFileReader) Close() error {
ff.bigFilesLock.Unlock()
} else {
_ = r.f.Close()
- err = errors.New("bug: File.Seek(0,0) returned (non-zero, nil)")
+ err = errors.New("bug: File.Seek(0, io.SeekStart) returned (non-zero, nil)")
}
} else {
_ = r.f.Close()
@@ -697,7 +750,11 @@ func (r *fsSmallFileReader) Read(p []byte) (int, error) {
ff := r.ff
if ff.f != nil {
- n, err := ff.f.ReadAt(p, int64(r.startPos))
+ ra, ok := ff.f.(io.ReaderAt)
+ if !ok {
+ return 0, errors.New("must implement io.ReaderAt")
+ }
+ n, err := ra.ReadAt(p, int64(r.startPos))
r.startPos += n
return n, err
}
@@ -732,7 +789,11 @@ func (r *fsSmallFileReader) WriteTo(w io.Writer) (int64, error) {
if len(buf) > tailLen {
buf = buf[:tailLen]
}
- n, err = ff.f.ReadAt(buf, int64(curPos))
+ ra, ok := ff.f.(io.ReaderAt)
+ if !ok {
+ return 0, errors.New("must implement io.ReaderAt")
+ }
+ n, err = ra.ReadAt(buf, int64(curPos))
nw, errw := w.Write(buf[:n])
curPos += nw
if errw == nil && nw != n {
@@ -799,6 +860,12 @@ func cleanCacheNolock(cache map[string]*fsFile, pendingFiles, filesToRelease []*
}
func (h *fsHandler) pathToFilePath(path string) string {
+ if _, ok := h.fs.(*osFS); !ok {
+ if len(path) < 1 {
+ return path
+ }
+ return path[1:]
+ }
return filepath.FromSlash(h.root + path)
}
@@ -1051,7 +1118,7 @@ func (h *fsHandler) openIndexFile(ctx *RequestCtx, dirPath string, mustCompress
if err == nil {
return ff, nil
}
- if !os.IsNotExist(err) {
+ if !errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("cannot open file %q: %w", indexFilePath, err)
}
}
@@ -1060,7 +1127,7 @@ func (h *fsHandler) openIndexFile(ctx *RequestCtx, dirPath string, mustCompress
return nil, fmt.Errorf("cannot access directory without index page. Directory %q", dirPath)
}
- return h.createDirIndex(ctx.URI(), dirPath, mustCompress, fileEncoding)
+ return h.createDirIndex(ctx, dirPath, mustCompress, fileEncoding)
}
var (
@@ -1068,9 +1135,11 @@ var (
errNoCreatePermission = errors.New("no 'create file' permissions")
)
-func (h *fsHandler) createDirIndex(base *URI, dirPath string, mustCompress bool, fileEncoding string) (*fsFile, error) {
+func (h *fsHandler) createDirIndex(ctx *RequestCtx, dirPath string, mustCompress bool, fileEncoding string) (*fsFile, error) {
w := &bytebufferpool.ByteBuffer{}
+ base := ctx.URI()
+
basePathEscaped := html.EscapeString(string(base.Path()))
_, _ = fmt.Fprintf(w, "<html><head><title>%s</title><style>.dir { font-weight: bold }</style></head><body>", basePathEscaped)
_, _ = fmt.Fprintf(w, "<h1>%s</h1>", basePathEscaped)
@@ -1084,28 +1153,29 @@ func (h *fsHandler) createDirIndex(base *URI, dirPath string, mustCompress bool,
_, _ = fmt.Fprintf(w, `<li><a href="%s" class="dir">..</a></li>`, parentPathEscaped)
}
- f, err := os.Open(dirPath)
+ dirEntries, err := fs.ReadDir(h.fs, dirPath)
if err != nil {
return nil, err
}
- fileinfos, err := f.Readdir(0)
- _ = f.Close()
- if err != nil {
- return nil, err
- }
-
- fm := make(map[string]os.FileInfo, len(fileinfos))
- filenames := make([]string, 0, len(fileinfos))
+ fm := make(map[string]fs.FileInfo, len(dirEntries))
+ filenames := make([]string, 0, len(dirEntries))
nestedContinue:
- for _, fi := range fileinfos {
- name := fi.Name()
+ for _, de := range dirEntries {
+ name := de.Name()
for _, cfs := range h.compressedFileSuffixes {
if strings.HasSuffix(name, cfs) {
// Do not show compressed files on index page.
continue nestedContinue
}
}
+ fi, err := de.Info()
+ if err != nil {
+ ctx.Logger().Printf("cannot fetch information from dir entry %q: %v, skip", name, err)
+
+ continue nestedContinue
+ }
+
fm[name] = fi
filenames = append(filenames, name)
}
@@ -1163,7 +1233,7 @@ const (
)
func (h *fsHandler) compressAndOpenFSFile(filePath string, fileEncoding string) (*fsFile, error) {
- f, err := os.Open(filePath)
+ f, err := h.fs.Open(filePath)
if err != nil {
return nil, err
}
@@ -1182,10 +1252,15 @@ func (h *fsHandler) compressAndOpenFSFile(filePath string, fileEncoding string)
if strings.HasSuffix(filePath, h.compressedFileSuffixes[fileEncoding]) ||
fileInfo.Size() > fsMaxCompressibleFileSize ||
!isFileCompressible(f, fsMinCompressRatio) {
- return h.newFSFile(f, fileInfo, false, "")
+ return h.newFSFile(f, fileInfo, false, filePath, "")
}
compressedFilePath := h.filePathToCompressed(filePath)
+
+ if _, ok := h.fs.(*osFS); !ok {
+ return h.newCompressedFSFileCache(f, fileInfo, compressedFilePath, fileEncoding)
+ }
+
if compressedFilePath != filePath {
if err := os.MkdirAll(filepath.Dir(compressedFilePath), os.ModePerm); err != nil {
return nil, err
@@ -1207,7 +1282,7 @@ func (h *fsHandler) compressAndOpenFSFile(filePath string, fileEncoding string)
return ff, err
}
-func (h *fsHandler) compressFileNolock(f *os.File, fileInfo os.FileInfo, filePath, compressedFilePath string, fileEncoding string) (*fsFile, error) {
+func (h *fsHandler) compressFileNolock(f fs.File, fileInfo fs.FileInfo, filePath, compressedFilePath string, fileEncoding string) (*fsFile, error) {
// Attempt to open compressed file created by another concurrent
// goroutine.
// It is safe opening such a file, since the file creation
@@ -1223,7 +1298,7 @@ func (h *fsHandler) compressFileNolock(f *os.File, fileInfo os.FileInfo, filePat
zf, err := os.Create(tmpFilePath)
if err != nil {
_ = f.Close()
- if !os.IsPermission(err) {
+ if !errors.Is(err, fs.ErrPermission) {
return nil, fmt.Errorf("cannot create temporary file %q: %w", tmpFilePath, err)
}
return nil, errNoCreatePermission
@@ -1258,8 +1333,71 @@ func (h *fsHandler) compressFileNolock(f *os.File, fileInfo os.FileInfo, filePat
return h.newCompressedFSFile(compressedFilePath, fileEncoding)
}
+// newCompressedFSFileCache use memory cache compressed files
+func (h *fsHandler) newCompressedFSFileCache(f fs.File, fileInfo fs.FileInfo, filePath, fileEncoding string) (*fsFile, error) {
+ var (
+ w = &bytebufferpool.ByteBuffer{}
+ err error
+ )
+
+ if fileEncoding == "br" {
+ zw := acquireStacklessBrotliWriter(w, CompressDefaultCompression)
+ _, err = copyZeroAlloc(zw, f)
+ if err1 := zw.Flush(); err == nil {
+ err = err1
+ }
+ releaseStacklessBrotliWriter(zw, CompressDefaultCompression)
+ } else if fileEncoding == "gzip" {
+ zw := acquireStacklessGzipWriter(w, CompressDefaultCompression)
+ _, err = copyZeroAlloc(zw, f)
+ if err1 := zw.Flush(); err == nil {
+ err = err1
+ }
+ releaseStacklessGzipWriter(zw, CompressDefaultCompression)
+ }
+ defer func() { _ = f.Close() }()
+
+ if err != nil {
+ return nil, fmt.Errorf("error when compressing file %q: %w", filePath, err)
+ }
+
+ seeker, ok := f.(io.Seeker)
+ if !ok {
+ return nil, errors.New("not implemented io.Seeker")
+ }
+ if _, err = seeker.Seek(0, io.SeekStart); err != nil {
+ return nil, err
+ }
+
+ ext := fileExtension(fileInfo.Name(), false, h.compressedFileSuffixes[fileEncoding])
+ contentType := mime.TypeByExtension(ext)
+ if len(contentType) == 0 {
+ data, err := readFileHeader(f, false, fileEncoding)
+ if err != nil {
+ return nil, fmt.Errorf("cannot read header of the file %q: %w", fileInfo.Name(), err)
+ }
+ contentType = http.DetectContentType(data)
+ }
+
+ dirIndex := w.B
+ lastModified := fileInfo.ModTime()
+ ff := &fsFile{
+ h: h,
+ dirIndex: dirIndex,
+ contentType: contentType,
+ contentLength: len(dirIndex),
+ compressed: true,
+ lastModified: lastModified,
+ lastModifiedStr: AppendHTTPDate(nil, lastModified),
+
+ t: time.Now(),
+ }
+
+ return ff, nil
+}
+
func (h *fsHandler) newCompressedFSFile(filePath string, fileEncoding string) (*fsFile, error) {
- f, err := os.Open(filePath)
+ f, err := h.fs.Open(filePath)
if err != nil {
return nil, fmt.Errorf("cannot open compressed file %q: %w", filePath, err)
}
@@ -1268,7 +1406,7 @@ func (h *fsHandler) newCompressedFSFile(filePath string, fileEncoding string) (*
_ = f.Close()
return nil, fmt.Errorf("cannot obtain info for compressed file %q: %w", filePath, err)
}
- return h.newFSFile(f, fileInfo, true, fileEncoding)
+ return h.newFSFile(f, fileInfo, true, filePath, fileEncoding)
}
func (h *fsHandler) openFSFile(filePath string, mustCompress bool, fileEncoding string) (*fsFile, error) {
@@ -1277,9 +1415,9 @@ func (h *fsHandler) openFSFile(filePath string, mustCompress bool, fileEncoding
filePath += h.compressedFileSuffixes[fileEncoding]
}
- f, err := os.Open(filePath)
+ f, err := h.fs.Open(filePath)
if err != nil {
- if mustCompress && os.IsNotExist(err) {
+ if mustCompress && errors.Is(err, fs.ErrNotExist) {
return h.compressAndOpenFSFile(filePathOriginal, fileEncoding)
}
return nil, err
@@ -1301,7 +1439,7 @@ func (h *fsHandler) openFSFile(filePath string, mustCompress bool, fileEncoding
}
if mustCompress {
- fileInfoOriginal, err := os.Stat(filePathOriginal)
+ fileInfoOriginal, err := fs.Stat(h.fs, filePathOriginal)
if err != nil {
_ = f.Close()
return nil, fmt.Errorf("cannot obtain info for original file %q: %w", filePathOriginal, err)
@@ -1318,10 +1456,10 @@ func (h *fsHandler) openFSFile(filePath string, mustCompress bool, fileEncoding
}
}
- return h.newFSFile(f, fileInfo, mustCompress, fileEncoding)
+ return h.newFSFile(f, fileInfo, mustCompress, filePath, fileEncoding)
}
-func (h *fsHandler) newFSFile(f *os.File, fileInfo os.FileInfo, compressed bool, fileEncoding string) (*fsFile, error) {
+func (h *fsHandler) newFSFile(f fs.File, fileInfo fs.FileInfo, compressed bool, filePath, fileEncoding string) (*fsFile, error) {
n := fileInfo.Size()
contentLength := int(n)
if n != int64(contentLength) {
@@ -1335,7 +1473,7 @@ func (h *fsHandler) newFSFile(f *os.File, fileInfo os.FileInfo, compressed bool,
if len(contentType) == 0 {
data, err := readFileHeader(f, compressed, fileEncoding)
if err != nil {
- return nil, fmt.Errorf("cannot read header of the file %q: %w", f.Name(), err)
+ return nil, fmt.Errorf("cannot read header of the file %q: %w", fileInfo.Name(), err)
}
contentType = http.DetectContentType(data)
}
@@ -1344,6 +1482,7 @@ func (h *fsHandler) newFSFile(f *os.File, fileInfo os.FileInfo, compressed bool,
ff := &fsFile{
h: h,
f: f,
+ filename: filePath,
contentType: contentType,
contentLength: contentLength,
compressed: compressed,
@@ -1355,7 +1494,7 @@ func (h *fsHandler) newFSFile(f *os.File, fileInfo os.FileInfo, compressed bool,
return ff, nil
}
-func readFileHeader(f *os.File, compressed bool, fileEncoding string) ([]byte, error) {
+func readFileHeader(f io.Reader, compressed bool, fileEncoding string) ([]byte, error) {
r := io.Reader(f)
var (
br *brotli.Reader
@@ -1381,7 +1520,11 @@ func readFileHeader(f *os.File, compressed bool, fileEncoding string) ([]byte, e
N: 512,
}
data, err := io.ReadAll(lr)
- if _, err := f.Seek(0, 0); err != nil {
+ seeker, ok := f.(io.Seeker)
+ if !ok {
+ return nil, errors.New("must implement io.Seeker")
+ }
+ if _, err := seeker.Seek(0, io.SeekStart); err != nil {
return nil, err
}
@@ -1456,3 +1599,10 @@ func getFileLock(absPath string) *sync.Mutex {
filelock := v.(*sync.Mutex)
return filelock
}
+
+var _ fs.FS = (*osFS)(nil)
+
+type osFS struct{}
+
+func (o *osFS) Open(name string) (fs.File, error) { return os.Open(name) }
+func (o *osFS) Stat(name string) (fs.FileInfo, error) { return os.Stat(name) }