diff --git a/Makefile b/Makefile
index 44134ee..3e399fd 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-.PHONY: build run test bench lint precompress clean release install commit bump changelog benchmark benchmark-keep benchmark-down benchmark-baremetal
+.PHONY: build run test bench lint precompress clean release install commit bump changelog benchmark benchmark-keep benchmark-down benchmark-baremetal benchmark-compress benchmark-compress-keep benchmark-compress-down
# Binary output path and name
BIN := bin/static-web
@@ -41,11 +41,11 @@ bench:
lint:
go vet ./...
-## precompress: gzip and brotli compress all files in ./public
+## precompress: gzip, brotli, and zstd compress all files in ./public
precompress:
@echo "Pre-compressing files in ./public ..."
@find ./public -type f \
- ! -name "*.gz" ! -name "*.br" \
+ ! -name "*.gz" ! -name "*.br" ! -name "*.zst" \
| while read f; do \
if command -v gzip >/dev/null 2>&1; then \
gzip -k -f "$$f" && echo " gzip: $$f.gz"; \
@@ -53,6 +53,9 @@ precompress:
if command -v brotli >/dev/null 2>&1; then \
brotli -f "$$f" -o "$$f.br" && echo " brotli: $$f.br"; \
fi; \
+ if command -v zstd >/dev/null 2>&1; then \
+ zstd -k -f "$$f" && echo " zstd: $$f.zst"; \
+ fi; \
done
@echo "Done."
@@ -87,3 +90,15 @@ benchmark-down:
## benchmark-baremetal: run bare-metal benchmark (static-web production vs Bun, no Docker)
benchmark-baremetal:
@bash benchmark/baremetal.sh
+
+## benchmark-compress: run compression-specific benchmark suite (tears down when done)
+benchmark-compress:
+ @bash benchmark/compress-bench.sh
+
+## benchmark-compress-keep: same as benchmark-compress but leaves containers running afterwards
+benchmark-compress-keep:
+ @bash benchmark/compress-bench.sh -k
+
+## benchmark-compress-down: tear down any running compression benchmark containers
+benchmark-compress-down:
+ docker compose -f benchmark/docker-compose.compression.yml down --remove-orphans
diff --git a/README.md b/README.md
index 9f63fca..1a52a72 100644
--- a/README.md
+++ b/README.md
@@ -54,7 +54,7 @@ static-web --help
| Feature | Detail |
|---------|--------|
| **In-memory LRU cache** | Size-bounded, byte-accurate; ~28 ns/op lookup with 0 allocations. Optional startup preload for instant cache hits. |
-| **gzip compression** | On-the-fly via pooled `gzip.Writer`; pre-compressed `.gz`/`.br` sidecar support |
+| **Compression** | On-the-fly gzip; pre-compressed `.gz`/`.br`/`.zst` sidecar support; priority: br > zstd > gzip |
| **HTTP/2** | Automatic ALPN negotiation when TLS is configured |
| **Conditional requests** | ETag, `304 Not Modified`, `If-Modified-Since`, `If-None-Match` |
| **Range requests** | Byte ranges via custom `parseRange`/`serveRange` implementation for video and large files |
@@ -103,7 +103,7 @@ HTTP request
│ • Range/conditional → custom serveRange() │
│ • Cache miss → os.Stat → disk read → cache put │
│ • Large files (> max_file_size) bypass cache │
-│ • Encoding negotiation: brotli > gzip > plain │
+│ • Encoding negotiation: brotli > zstd > gzip > plain │
│ • Preloaded files served instantly on startup │
│ • Custom 404 page (path-validated) │
└─────────────────────────────────────────────────┘
@@ -262,7 +262,7 @@ Copy `config.toml.example` to `config.toml` and edit as needed. The server start
| `enabled` | bool | `true` | Enable compression |
| `min_size` | int | `1024` | Minimum bytes to compress |
| `level` | int | `5` | gzip level (1–9) |
-| `precompressed` | bool | `true` | Serve `.gz`/`.br` sidecar files |
+| `precompressed` | bool | `true` | Serve `.gz`/`.br`/`.zst` sidecar files |
### `[headers]`
@@ -345,24 +345,27 @@ When TLS is configured:
## Pre-compressed Files
-Place `.gz` and `.br` sidecar files alongside originals. The server serves them automatically when the client signals support:
+Place `.gz`, `.br`, and `.zst` sidecar files alongside originals. The server serves them automatically when the client signals support:
```
public/
app.js
app.js.gz ← served for Accept-Encoding: gzip
- app.js.br ← served for Accept-Encoding: br (preferred over gzip)
+ app.js.br ← served for Accept-Encoding: br (preferred)
+ app.js.zst ← served for Accept-Encoding: zstd (fastest decompress)
style.css
style.css.gz
+ style.css.br
+ style.css.zst
```
Generate sidecars from the `Makefile`:
```bash
-make precompress # runs gzip and brotli on all .js/.css/.html/.json/.svg
+make precompress # runs gzip, brotli, and zstd on all .js/.css/.html/.json/.svg
```
-> **Note**: On-the-fly brotli encoding is not implemented. Only `.br` sidecar files are served with brotli encoding.
+> **Note**: On-the-fly brotli encoding is not implemented. Only `.br` sidecar files are served with brotli encoding. Zstandard is available both as pre-compressed sidecar files and on-the-fly compression.
---
diff --git a/USER_GUIDE.md b/USER_GUIDE.md
index 73ee7c7..4d620b8 100644
--- a/USER_GUIDE.md
+++ b/USER_GUIDE.md
@@ -42,14 +42,14 @@ make build # produces bin/static-web
The server starts with sensible defaults even without a config file:
-| Default | Value |
-| ---------------------- | --------------------- |
-| Listen address | `:8080` |
-| Static files directory | `./public` |
-| In-memory cache | enabled, 256 MB |
-| Compression | enabled, gzip level 5 |
-| Dotfile protection | enabled |
-| Security headers | always set |
+| Default | Value |
+| ---------------------- | ------------------------------------- |
+| Listen address | `:8080` |
+| Static files directory | `./public` |
+| In-memory cache | enabled, 256 MB |
+| Compression | enabled, gzip level 5, br + zstd |
+| Dotfile protection | enabled |
+| Security headers | always set |
Point your browser at `http://localhost:8080`.
@@ -135,7 +135,7 @@ preload = false # true = load all files into RAM at startup
enabled = true
min_size = 1024 # don't compress responses smaller than 1 KB
level = 5 # gzip level 1 (fastest) – 9 (best)
-precompressed = true # serve .gz / .br sidecar files when available
+precompressed = true # serve .gz / .br / .zst sidecar files when available
[headers]
immutable_pattern = "" # glob for fingerprinted assets → Cache-Control: immutable
@@ -291,15 +291,18 @@ server {
## Pre-compressing Assets
-Serving pre-compressed files is far more efficient than on-the-fly gzip, especially for large JavaScript bundles. Place `.gz` and `.br` files alongside originals:
+Serving pre-compressed files is far more efficient than on-the-fly compression, especially for large JavaScript bundles. Place `.gz`, `.br`, and `.zst` files alongside originals:
```
public/
app.js
app.js.gz ← served when client sends Accept-Encoding: gzip
- app.js.br ← served when client sends Accept-Encoding: br (preferred over gzip)
+ app.js.br ← served when client sends Accept-Encoding: br (preferred)
+ app.js.zst ← served when client sends Accept-Encoding: zstd (fastest decompress)
style.css
style.css.gz
+ style.css.br
+ style.css.zst
```
Generate them with the bundled Makefile target:
@@ -308,7 +311,7 @@ Generate them with the bundled Makefile target:
make precompress
```
-Or manually (requires `gzip` and `brotli` installed):
+Or manually (requires `gzip`, `brotli`, and `zstd` installed):
```bash
# gzip
@@ -316,6 +319,9 @@ gzip -k -9 public/app.js # keeps original, produces app.js.gz
# brotli
brotli -9 public/app.js -o public/app.js.br
+
+# zstandard
+zstd -k public/app.js # keeps original, produces app.js.zst
```
Enable in config (on by default):
@@ -327,6 +333,16 @@ precompressed = true
> **Note:** Brotli encoding is only available via pre-compressed `.br` sidecar files. On-the-fly brotli compression is not implemented.
+### Encoding Priority
+
+When a client sends multiple encodings in `Accept-Encoding`, the server selects in this order:
+
+1. **Brotli** (`.br`) — best compression ratio
+2. **Zstandard** (`.zst`) — fastest decompression, good compression
+3. **Gzip** (`.gz`) — universally supported fallback
+
+This ordering provides the best balance of compression ratio and decompression speed.
+
---
## Docker Deployment
@@ -744,6 +760,8 @@ Directory listing is **disabled by default** (`directory_listing = false`). Enab
| **Brotli on-the-fly not implemented** | Brotli encoding requires pre-compressed `.br` files. | Run `make precompress` as part of your build pipeline. |
| **No hot config reload** | SIGHUP flushes the cache only; config changes require a restart. | Use a process manager (systemd, Docker restart policy) for zero-downtime restarts. |
+> **Note:** Zstandard (`.zst`) compression is available both as pre-compressed sidecar files and on-the-fly compression.
+
---
## Troubleshooting
@@ -779,7 +797,7 @@ If `cache.ttl` is `0`, entries remain cached until eviction pressure or SIGHUP f
1. Verify `compression.enabled = true` in config.
2. Check that the response is larger than `compression.min_size` (default: 1024 bytes).
-3. The client must send `Accept-Encoding: gzip`. Browsers do this automatically; `curl` does not by default — use `curl --compressed`.
+3. The client must send `Accept-Encoding: gzip`, `br`, or `zstd`. Browsers do this automatically; `curl` does not by default — use `curl --compressed` (for gzip) or specify the encoding explicitly.
4. Some content types are not compressed (images, video, audio, pre-compressed archives). This is intentional — re-compressing already-compressed data makes files larger.
### HTTPS redirect loop
diff --git a/config.toml.example b/config.toml.example
index 8f91030..320d561 100644
--- a/config.toml.example
+++ b/config.toml.example
@@ -65,7 +65,8 @@ min_size = 1024
# gzip compression level (1=fastest, 9=best). Default 5 is a good balance.
level = 5
-# Serve pre-compressed .gz and .br sidecar files when they exist alongside originals.
+# Serve pre-compressed .gz, .br, and .zst sidecar files when they exist alongside originals.
+# Encoding priority: br > zstd > gzip
precompressed = true
[headers]
diff --git a/docs/index.html b/docs/index.html
index 6cf485f..ec949ce 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -6,11 +6,11 @@
static-web — High-Performance Go Static File Server
@@ -20,7 +20,7 @@
@@ -37,7 +37,7 @@
@@ -75,7 +75,7 @@
"programmingLanguage": "Go",
"license": "https://github.com/BackendStack21/static-web/blob/main/LICENSE",
"codeRepository": "https://github.com/BackendStack21/static-web",
- "description": "A production-grade, blazing-fast static web file server written in Go. ~148k req/sec with fasthttp — 59% faster than Bun. Features in-memory LRU cache, TTL-aware cache expiry, HTTP/2, TLS 1.2+, gzip and brotli compression, and comprehensive security headers.",
+ "description": "A production-grade, blazing-fast static web file server written in Go. ~148k req/sec with fasthttp — 59% faster than Bun. Features in-memory LRU cache, TTL-aware cache expiry, HTTP/2, TLS 1.2+, gzip, brotli, and zstd compression, and comprehensive security headers.",
"author": {
"@type": "Person",
"name": "Rolando Santamaria Maso",
@@ -86,22 +86,22 @@
"price": "0",
"priceCurrency": "USD"
},
- "featureList": [
- "~148k req/sec — 59% faster than Bun's native static server",
- "In-memory LRU cache with ~28 ns/op lookup",
- "Startup preloading with path-safety cache pre-warming",
- "TTL-aware cache expiry with optional automatic stale-entry eviction",
- "Direct ctx.SetBody() fast path with pre-formatted headers for cache hits",
- "HTTP/2 with TLS 1.2+ and HTTP→HTTPS redirect",
- "TLS 1.2+ with AEAD cipher suites",
- "gzip and brotli compression",
- "6-step path traversal prevention",
- "Security headers (CSP, HSTS, Permissions-Policy)",
- "CORS with wildcard and per-origin modes",
- "Directory listing with breadcrumb navigation",
- "Docker and container ready",
- "Graceful shutdown with signal handling"
- ]
+ "featureList": [
+ "~148k req/sec — 59% faster than Bun's native static server",
+ "In-memory LRU cache with ~28 ns/op lookup",
+ "Startup preloading with path-safety cache pre-warming",
+ "TTL-aware cache expiry with optional automatic stale-entry eviction",
+ "Direct ctx.SetBody() fast path with pre-formatted headers for cache hits",
+ "HTTP/2 with TLS 1.2+ and HTTP→HTTPS redirect",
+ "TLS 1.2+ with AEAD cipher suites",
+ "gzip, brotli, and zstd compression",
+ "6-step path traversal prevention",
+ "Security headers (CSP, HSTS, Permissions-Policy)",
+ "CORS with wildcard and per-origin modes",
+ "Directory listing with breadcrumb navigation",
+ "Docker and container ready",
+ "Graceful shutdown with signal handling"
+ ]
},
{
"@type": "BreadcrumbList",
@@ -218,7 +218,7 @@
static-web
Production-Grade Go Static File Server
- Blazing fast, lightweight static server with in-memory LRU cache, startup preloading, HTTP/2, TLS, gzip / brotli,
+ Blazing fast, lightweight static server with in-memory LRU cache, startup preloading, HTTP/2, TLS, gzip / brotli / zstd,
and security headers baked in.
@@ -301,10 +301,10 @@ Near-Zero Alloc Hot Path
-
gzip + Brotli
+
gzip + Brotli + Zstd
- On-the-fly gzip via pooled writers, plus pre-compressed .gz/.br sidecar file
- support. Brotli preference over gzip.
+ On-the-fly gzip and zstd via pooled writers, plus pre-compressed .gz/.br/.zst sidecar file
+ support. Encoding priority: brotli > zstd > gzip.
@@ -603,7 +603,7 @@
Compress Middleware
File Handler
- Preloaded or cached → direct ctx.SetBody() fast path · brotli/gzip sidecar negotiation · miss → stat → read →
+ Preloaded or cached → direct ctx.SetBody() fast path · brotli/zstd/gzip sidecar negotiation · miss → stat → read →
cache
diff --git a/go.mod b/go.mod
index 1cced69..4d8a872 100644
--- a/go.mod
+++ b/go.mod
@@ -5,11 +5,11 @@ go 1.26
require (
github.com/BurntSushi/toml v1.6.0
github.com/hashicorp/golang-lru/v2 v2.0.7
+ github.com/klauspost/compress v1.18.4
github.com/valyala/fasthttp v1.69.0
)
require (
github.com/andybalholm/brotli v1.2.0 // indirect
- github.com/klauspost/compress v1.18.2 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
)
diff --git a/go.sum b/go.sum
index bcd2077..19fa8d4 100644
--- a/go.sum
+++ b/go.sum
@@ -4,9 +4,11 @@ github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwTo
github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
-github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
-github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
+github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
+github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.69.0 h1:fNLLESD2SooWeh2cidsuFtOcrEi4uB4m1mPrkJMZyVI=
github.com/valyala/fasthttp v1.69.0/go.mod h1:4wA4PfAraPlAsJ5jMSqCE2ug5tqUPwKXxVj8oNECGcw=
+github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
+github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
diff --git a/internal/cache/cache.go b/internal/cache/cache.go
index 206a6c1..a0c7a38 100644
--- a/internal/cache/cache.go
+++ b/internal/cache/cache.go
@@ -34,6 +34,8 @@ type CachedFile struct {
GzipData []byte
// BrData is the pre-compressed brotli content, or nil if unavailable.
BrData []byte
+ // ZstdData is the pre-compressed zstd content, or nil if unavailable.
+ ZstdData []byte
// ETag is the first 16 hex characters of sha256(Data), without quotes.
ETag string
// ETagFull is the pre-formatted weak ETag ready for use in HTTP headers,
@@ -150,7 +152,7 @@ func matchesImmutable(urlPath, pattern string) bool {
// totalSize returns the approximate byte footprint of the entry.
func (f *CachedFile) totalSize() int64 {
- return int64(len(f.Data)+len(f.GzipData)+len(f.BrData)) + cacheOverhead
+ return int64(len(f.Data)+len(f.GzipData)+len(f.BrData)+len(f.ZstdData)) + cacheOverhead
}
// CacheStats holds runtime statistics for the cache.
diff --git a/internal/compress/compress.go b/internal/compress/compress.go
index 7ac527a..19882af 100644
--- a/internal/compress/compress.go
+++ b/internal/compress/compress.go
@@ -11,6 +11,7 @@ import (
"sync"
"github.com/BackendStack21/static-web/internal/config"
+ "github.com/klauspost/compress/zstd"
"github.com/valyala/fasthttp"
)
@@ -230,3 +231,43 @@ func (bw *byteWriter) Write(p []byte) (int, error) {
*bw.buf = append(*bw.buf, p...)
return len(p), nil
}
+
+// zstdWriterPool pools zstd.Encoders to amortise allocation costs.
+var zstdWriterPool = sync.Pool{
+ New: func() any {
+ w, _ := zstd.NewWriter(io.Discard, zstd.WithEncoderLevel(zstd.SpeedDefault))
+ return w
+ },
+}
+
+// zstdBufPool pools bytes.Buffers used for on-the-fly zstd output.
+var zstdBufPool = sync.Pool{
+ New: func() any {
+ return &bytes.Buffer{}
+ },
+}
+
+// ZstdBytes compresses src with the default zstd level and returns the result.
+// Used during cache population to pre-compress file contents.
+func ZstdBytes(src []byte) ([]byte, error) {
+ return ZstdBytesLevel(src, zstd.SpeedDefault)
+}
+
+// ZstdBytesLevel compresses src with the specified zstd level and returns the result.
+// The level can be zstd.SpeedFastest, zstd.SpeedDefault, zstd.SpeedBetter, or zstd.SpeedBest.
+func ZstdBytesLevel(src []byte, level zstd.EncoderLevel) ([]byte, error) {
+ out := make([]byte, 0, len(src)/2+512)
+ bw := &byteWriter{buf: &out}
+
+ w, err := zstd.NewWriter(bw, zstd.WithEncoderLevel(level))
+ if err != nil {
+ return nil, err
+ }
+ if _, err := w.Write(src); err != nil {
+ return nil, err
+ }
+ if err := w.Close(); err != nil {
+ return nil, err
+ }
+ return *bw.buf, nil
+}
diff --git a/internal/compress/compress_test.go b/internal/compress/compress_test.go
index f88901e..0143741 100644
--- a/internal/compress/compress_test.go
+++ b/internal/compress/compress_test.go
@@ -10,6 +10,7 @@ import (
"github.com/BackendStack21/static-web/internal/compress"
"github.com/BackendStack21/static-web/internal/config"
+ "github.com/klauspost/compress/zstd"
"github.com/valyala/fasthttp"
)
@@ -352,6 +353,133 @@ func TestGzipBytes_InvalidLevel(t *testing.T) {
}
}
+// ---------------------------------------------------------------------------
+// ZStandard compression tests
+// ---------------------------------------------------------------------------
+
+func TestZstdBytes(t *testing.T) {
+ src := []byte(strings.Repeat("Hello, ZStandard! ", 100))
+ compressed, err := compress.ZstdBytes(src)
+ if err != nil {
+ t.Fatalf("ZstdBytes error: %v", err)
+ }
+ if len(compressed) == 0 {
+ t.Fatal("ZstdBytes returned empty result")
+ }
+ if len(compressed) >= len(src) {
+ t.Errorf("compressed (%d) should be smaller than original (%d)", len(compressed), len(src))
+ }
+
+ // Decompress and verify.
+ br, err := zstd.NewReader(bytes.NewReader(compressed))
+ if err != nil {
+ t.Fatalf("zstd.NewReader: %v", err)
+ }
+ got, err := io.ReadAll(br)
+ if err != nil {
+ t.Fatalf("io.ReadAll: %v", err)
+ }
+ br.Close()
+ if !bytes.Equal(got, src) {
+ t.Error("decompressed content does not match original")
+ }
+}
+
+func TestZstdBytesLevel(t *testing.T) {
+ src := []byte(strings.Repeat("ZStandard compression levels ", 100))
+
+ levels := []zstd.EncoderLevel{
+ zstd.SpeedFastest,
+ zstd.SpeedDefault,
+ zstd.SpeedBetterCompression,
+ zstd.SpeedBestCompression,
+ }
+
+ for _, level := range levels {
+ t.Run(level.String(), func(t *testing.T) {
+ compressed, err := compress.ZstdBytesLevel(src, level)
+ if err != nil {
+ t.Fatalf("ZstdBytesLevel(%s) error: %v", level, err)
+ }
+ if len(compressed) == 0 {
+ t.Fatal("ZstdBytesLevel returned empty result")
+ }
+
+ // Decompress and verify roundtrip.
+ br, err := zstd.NewReader(bytes.NewReader(compressed))
+ if err != nil {
+ t.Fatalf("zstd.NewReader: %v", err)
+ }
+ got, err := io.ReadAll(br)
+ if err != nil {
+ t.Fatalf("io.ReadAll: %v", err)
+ }
+ br.Close()
+ if !bytes.Equal(got, src) {
+ t.Error("decompressed content does not match original")
+ }
+ })
+ }
+}
+
+func TestZstdBytes_EmptyInput(t *testing.T) {
+ src := []byte{}
+ compressed, err := compress.ZstdBytes(src)
+ if err != nil {
+ t.Fatalf("ZstdBytes empty error: %v", err)
+ }
+ // Empty input may produce empty or minimal output
+ if len(compressed) == 0 {
+ t.Log("ZstdBytes produced empty output for empty input (valid zstd behavior)")
+ return
+ }
+
+ // If there is output, try to decompress and verify.
+ br, err := zstd.NewReader(bytes.NewReader(compressed))
+ if err != nil {
+ t.Fatalf("zstd.NewReader: %v", err)
+ }
+ got, err := io.ReadAll(br)
+ if err != nil {
+ t.Fatalf("io.ReadAll: %v", err)
+ }
+ br.Close()
+ if len(got) != 0 {
+ t.Errorf("decompressed empty should be empty, got %d bytes", len(got))
+ }
+}
+
+func TestZstdBytes_AlreadyCompressed(t *testing.T) {
+ // Zstandard of already-compressed data should still work.
+ compressed1, err := compress.ZstdBytes([]byte(strings.Repeat("test ", 100)))
+ if err != nil {
+ t.Fatalf("first compression error: %v", err)
+ }
+
+ // Compress again
+ compressed2, err := compress.ZstdBytes(compressed1)
+ if err != nil {
+ t.Fatalf("second compression error: %v", err)
+ }
+
+ // Should be able to decompress the second compression to get the first
+ br, err := zstd.NewReader(bytes.NewReader(compressed2))
+ if err != nil {
+ t.Fatalf("zstd.NewReader: %v", err)
+ }
+ got, err := io.ReadAll(br)
+ if err != nil {
+ t.Fatalf("io.ReadAll: %v", err)
+ }
+ br.Close()
+
+ // Compare with first compression
+ if !bytes.Equal(got, compressed1) {
+ t.Errorf("double-compressed data (%d bytes) does not match single-compressed (%d bytes)",
+ len(got), len(compressed1))
+ }
+}
+
// TestAcceptsEncoding_MultipleValues covers more Accept-Encoding negotiation cases.
func TestAcceptsEncoding_MultipleValues(t *testing.T) {
cases := []struct {
diff --git a/internal/handler/file.go b/internal/handler/file.go
index f207db3..5055eec 100644
--- a/internal/handler/file.go
+++ b/internal/handler/file.go
@@ -226,10 +226,14 @@ func (h *FileHandler) negotiateEncoding(ctx *fasthttp.RequestCtx, f *cache.Cache
return f.Data, ""
}
- // Brotli preferred over gzip when available.
+ // Brotli preferred (best compression), then zstd (fastest decompression),
+ // then gzip (universally supported fallback).
if f.BrData != nil && compress.AcceptsEncoding(ctx, "br") {
return f.BrData, "br"
}
+ if f.ZstdData != nil && compress.AcceptsEncoding(ctx, "zstd") {
+ return f.ZstdData, "zstd"
+ }
if f.GzipData != nil && compress.AcceptsEncoding(ctx, "gzip") {
return f.GzipData, "gzip"
}
@@ -295,6 +299,7 @@ func (h *FileHandler) serveFromDisk(ctx *fasthttp.RequestCtx, absPath, urlPath s
compress.IsCompressible(ct) && len(data) >= h.cfg.Compression.MinSize {
cached.GzipData = loadSidecar(absPath + ".gz")
cached.BrData = loadSidecar(absPath + ".br")
+ cached.ZstdData = loadSidecar(absPath + ".zst")
}
// Generate on-the-fly gzip if no sidecar and content is compressible.
@@ -305,6 +310,14 @@ func (h *FileHandler) serveFromDisk(ctx *fasthttp.RequestCtx, absPath, urlPath s
}
}
+ // Generate on-the-fly zstd if no sidecar and content is compressible.
+ if cached.ZstdData == nil && h.cfg.Compression.Enabled &&
+ compress.IsCompressible(ct) && len(data) >= h.cfg.Compression.MinSize {
+ if zst, err := compress.ZstdBytes(data); err == nil {
+ cached.ZstdData = zst
+ }
+ }
+
// Pre-format headers for the fast serving path.
cached.InitHeaders()
cached.InitCacheControl(urlPath, h.cfg.Headers.HTMLMaxAge, h.cfg.Headers.StaticMaxAge, h.cfg.Headers.ImmutablePattern)
diff --git a/internal/handler/file_test.go b/internal/handler/file_test.go
index 8a50718..d0b99ab 100644
--- a/internal/handler/file_test.go
+++ b/internal/handler/file_test.go
@@ -869,6 +869,50 @@ func BenchmarkHandler_CacheHitGzip(b *testing.B) {
})
}
+// BenchmarkHandler_CacheHitZstd measures cache-hit throughput when the client
+// accepts zstd and on-the-fly zstd compression is generated and cached.
+func BenchmarkHandler_CacheHitZstd(b *testing.B) {
+ log.SetOutput(io.Discard)
+ b.Cleanup(func() { log.SetOutput(os.Stderr) })
+
+ root := b.TempDir()
+ content := strings.Repeat("body { color: red; } ", 100)
+ if err := os.WriteFile(filepath.Join(root, "bench.css"), []byte(content), 0644); err != nil {
+ b.Fatal(err)
+ }
+
+ cfg := &config.Config{}
+ cfg.Files.Root = root
+ cfg.Files.Index = "index.html"
+ cfg.Cache.Enabled = true
+ cfg.Cache.MaxBytes = 64 * 1024 * 1024
+ cfg.Cache.MaxFileSize = 10 * 1024 * 1024
+ cfg.Compression.Enabled = true
+ cfg.Compression.MinSize = 1
+ cfg.Compression.Level = 5
+ cfg.Compression.Precompressed = false // use on-the-fly zstd
+ cfg.Security.BlockDotfiles = true
+ cfg.Headers.StaticMaxAge = 3600
+
+ c := cache.NewCache(cfg.Cache.MaxBytes)
+ h := handler.BuildHandler(cfg, c)
+
+ // Warm — zstd variant is generated and cached on first request.
+ warmCtx := newTestCtx("GET", "/bench.css")
+ warmCtx.Request.Header.Set("Accept-Encoding", "zstd")
+ h(warmCtx)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ ctx := newTestCtx("GET", "/bench.css")
+ ctx.Request.Header.Set("Accept-Encoding", "zstd")
+ h(ctx)
+ }
+ })
+}
+
// BenchmarkHandler_CacheHitQuiet measures the cache-hit path with request logging disabled.
func BenchmarkHandler_CacheHitQuiet(b *testing.B) {
log.SetOutput(io.Discard)