diff --git a/.github/workflows/github-package.yml b/.github/workflows/github-package.yml new file mode 100644 index 0000000..cbcb92a --- /dev/null +++ b/.github/workflows/github-package.yml @@ -0,0 +1,44 @@ +name: Publish GitHub Package + +on: + workflow_dispatch: + push: + tags: + - "v*" + +permissions: + contents: read + packages: write + +jobs: + publish-github-package: + name: Publish to GitHub Packages + runs-on: ubuntu-latest + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Set up Node.js for GitHub Packages + uses: actions/setup-node@v4 + with: + node-version: "20" + registry-url: "https://npm.pkg.github.com" + scope: "@eamon2009" + cache: "npm" + cache-dependency-path: frontend/package-lock.json + + - name: Build frontend assets + run: | + npm --prefix frontend ci + npm --prefix frontend run build + + - name: Prepare GitHub Packages metadata + run: | + npm pkg set name="@eamon2009/quadtrix" + npm pkg set publishConfig.registry="https://npm.pkg.github.com" + + - name: Publish package + run: npm publish + env: + NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..c4cba46 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,57 @@ +name: Release + +on: + push: + tags: + - "v*" + +permissions: + contents: write + +jobs: + build-release: + name: Build release artifacts + runs-on: ubuntu-latest + + steps: + - name: Check out repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: "20" + cache: "npm" + cache-dependency-path: frontend/package-lock.json + + - name: Build C++ binary + run: g++ -std=c++17 -O2 -I. -Iinclude -o quadtrix main.cpp + + - name: Compile Python sources + run: python -m compileall backend engine iGPU + + - name: Build frontend + working-directory: frontend + run: | + npm ci + npm run build + + - name: Package release files + run: | + tar \ + --exclude='*.pt' \ + --exclude='engine/logs' \ + --exclude='__pycache__' \ + -czf quadtrix-linux.tar.gz \ + quadtrix README.md LICENSE run.md backend engine iGPU include src data config frontend/dist + + - name: Create GitHub release + uses: softprops/action-gh-release@v2 + with: + files: quadtrix-linux.tar.gz + generate_release_notes: true diff --git a/.gitignore b/.gitignore index 92a8128..82e644e 100644 --- a/.gitignore +++ b/.gitignore @@ -7,7 +7,10 @@ libtorch-win-shared-with-deps-2.11.0+cpu (2) frontend/node_modules/ frontend/dist/ .venv +.npm-cache/ build CMakeCache.txt +engine/fine-tune/input.txt *best_model.pt -*exe \ No newline at end of file +*.pt +*exe diff --git a/.npmignore b/.npmignore new file mode 100644 index 0000000..86f73e5 --- /dev/null +++ b/.npmignore @@ -0,0 +1,25 @@ +__pycache__/ +*.pyc +*.pyo +*.pt +*.bin +*.exe +.git/ +.github/ +.npm-cache/ +.venv/ +.vscode/ +build/ +frontend/node_modules/ +frontend/src/ +frontend/public/ +frontend/package-lock.json +frontend/package.json +frontend/postcss.config.js +frontend/tailwind.config.ts +frontend/tsconfig.json +frontend/vite.config.ts +engine/logs/ +engine/fine-tune/ +engine/fineweb_30mb.txt +engine/data/input.txt diff --git a/.vscode/c++.json b/.vscode/c++.json index 665573f..f233411 100644 --- a/.vscode/c++.json +++ b/.vscode/c++.json @@ -13,7 +13,7 @@ "_UNICODE" ], "windowsSdkVersion": "10.0.19041.0", - "compilerPath": "C:/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.34.31933/bin/Hostx64/x64/cl.exe", + "compilerPath": "", "cStandard": "c17", "cppStandard": "c++17", "intelliSenseMode": "windows-msvc-x64", diff --git a/CMakeLists.txt b/CMakeLists.txt deleted file mode 100644 index 5148a01..0000000 --- a/CMakeLists.txt +++ /dev/null @@ -1,52 +0,0 @@ -cmake_minimum_required(VERSION 3.10) - -project(QuadtrixCpp LANGUAGES CXX) - -set(CMAKE_CXX_STANDARD 17) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) - -set(TORCH_DIR "" CACHE PATH "Path to the LibTorch root directory") -option(QUADTRIX_ENABLE_TORCH "Build the LibTorch example target" ON) - -if(MINGW) - message(FATAL_ERROR - "LibTorch on Windows is not reliably supported with MinGW GCC 6.3. " - "Use MSVC for the torch target, or configure with -DQUADTRIX_ENABLE_TORCH=OFF.") -endif() - -if(QUADTRIX_ENABLE_TORCH) - if(TORCH_DIR) - list(APPEND CMAKE_PREFIX_PATH "${TORCH_DIR}") - endif() - - find_package(Torch REQUIRED) - - add_executable(quadtrix_torch_cpp - src/torch_example.cpp - ) - - target_include_directories(quadtrix_torch_cpp PRIVATE - ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/include - ) - - target_link_libraries(quadtrix_torch_cpp PRIVATE ${TORCH_LIBRARIES}) - set_property(TARGET quadtrix_torch_cpp PROPERTY CXX_STANDARD 17) - set_property(TARGET quadtrix_torch_cpp PROPERTY CXX_STANDARD_REQUIRED ON) - - if(MSVC) - target_compile_options(quadtrix_torch_cpp PRIVATE /W4 /EHsc) - else() - target_compile_options(quadtrix_torch_cpp PRIVATE -Wall -Wextra -Wpedantic) - endif() - - if(WIN32) - file(GLOB TORCH_DLLS "${TORCH_INSTALL_PREFIX}/lib/*.dll") - add_custom_command(TARGET quadtrix_torch_cpp POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy_if_different - ${TORCH_DLLS} - $ - ) - endif() -endif() diff --git a/README.md b/README.md index 8c5685c..2f5dea3 100644 --- a/README.md +++ b/README.md @@ -1,565 +1,971 @@ # Quadtrix.cpp -

-image +Quadtrix.cpp is a local language model project with several execution paths: - - - -

+- A dependency-free C++17 transformer implementation with manual forward and backward passes. +- A PyTorch training and inference path for faster experimentation on CPU, CUDA, or supported accelerator backends. +- A FastAPI middleware layer for chat sessions, health checks, backend selection, and feedback. +- A React + TypeScript frontend for local chat, settings, session history, and model status. +- Optional package/CLI support through `bin/quadtrix.js`. -[![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT) -[[Release](https://github.com/Eamon2009/Quadtrix.cpp/releases/tag/v1.01)] -## Quadtrix.cpp a autoregressive language model in two variants: +> [!IMPORTANT] +> Please be aware that several commands listed in this documentation—specifically those involving file paths and directory navigation—should not be directly copied and pasted into your terminal. Because file structures and path syntax (such as / vs \) vary significantly across operating systems like Windows, macOS, and Linux, you must manually adjust these arguments to match your local environment. Ensure you verify your current working directory and replace any placeholder paths with the absolute or relative path specific to your machine to avoid execution errors. -CPU Implementation (C++17): Fully custom from-scratch implementation with zero external dependencies. Features a hand-rolled Tensor class with manual gradient tracking, explicit forward/backward passes through transformer blocks (multi-head self-attention, feedforward MLPs, layer normalization), AdamW optimizer with momentum and weight decay, and cross-entropy loss. All operations—matrix multiplications, softmax, GELU activations, attention scoring—are raw C++ arithmetic with no framework overhead. +The project is designed as a technical learning implementation. The C++ path exposes the transformer internals directly: tensor operations, attention, layer normalization, cross-entropy, analytical gradients, AdamW, checkpointing, and autoregressive generation. +## v1.1.0 +run_20260508_110726 -GPU Implementation (PyTorch + CUDA): Architecturally identical transformer (same layer configs, attention heads, embedding dimensions) but delegates tensor operations to PyTorch's CUDA backend for GPU parallelization. The model structure remains unchanged; only the compute substrate shifts from CPU to GPU tensor cores. +--- +run_20260430_192930 -Training Pipeline: Both versions follow standard autoregressive training: tokenize input text → forward pass through embedding + N transformer blocks → compute cross-entropy loss on next-token predictions → backpropagation to compute gradients → AdamW weight updates. Repeated over batches until convergence. +--- +## Contents + +- [System Architecture](#system-architecture) +- [Repository Structure](#repository-structure) +- [Requirements](#requirements) +- [Setup](#setup) +- [Run the Project](#run-the-project) +- [C++ Backend](#c-backend) +- [PyTorch Backend](#pytorch-backend) +- [FastAPI Backend](#fastapi-backend) +- [Frontend](#frontend) +- [NPM CLI](#npm-cli) +- [Configuration](#configuration) +- [API Reference](#api-reference) +- [Training Details](#training-details) +- [Reports and Results](#reports-and-results) +- [Troubleshooting](#troubleshooting) +- [License](#license) + +## System Architecture + +Quadtrix has four main layers: + +```text +User Interface + React + Vite frontend + PWA assets and service worker + +API Layer + FastAPI app in backend/ + Session storage, request validation, CORS, health checks + +Model Backends + C++ executable: main.cpp + include/ + PyTorch checkpoint: engine/inference.py + engine/best_model.pt or engine/best_model .pt + +Training and Data + C++ character-level training from data/input.txt + PyTorch GPT-2 BPE training from engine/input.txt or QUADTRIX_TRAIN_DATA +``` -GPU Extension Limitation: A native CUDA implementation (custom kernels for matrix ops, attention, etc.) requires access to NVIDIA hardware for development and testing, currently unavailable. The PyTorch version serves as a GPU-accelerated alternative without requiring hand-written CUDA -image +### Backend Modes +| Backend | Path | Purpose | +|---|---|---| +| C++ CPU | `main.cpp`, `include/`, `config/` | Manual transformer training, generation, terminal chat | +| PyTorch | `engine/main.py`, `engine/inference.py` | Faster training/inference with `torch` and `tiktoken` | +| iGPU | `iGPU/` | Experimental integrated-GPU inference/training scripts | +| API | `backend/main.py` | FastAPI middleware used by the frontend | +| Web UI | `frontend/` | Local chat interface and PWA shell | +## Repository Structure ---- -# Leaderboard -| s.no | time | val_bpb | CORE | Description | Date | Contributors | -|---|-------------|---------|------|-------------|------|--------------| -| 0 | 39.4 min | 1.3145 | 0.82M | Quadtrix CPU baseline, small data (200K chars), fragmented output | 2026 | @Eamon2009 | -| 1 | 61.3 min | 0.7176 | 10.82M |Quadtrix Colab large-scale run, coherent paragraphs, strong convergence | 2026 | @Eamon2009 | -| 2 | 6.1 min | 0.9250 | 1.99M | Quadtrix T4 optimized run, fast training, stable learning, basic coherence | 2026 | @Eamon2009 | -| 3 | 76.2 min | 1.6371 | ~0.82M | Quadtrix.cpp Extended CPU training (3000 iters) | 2026 | @Eamon2009 | ---- +This is the current project layout, excluding generated caches such as `.git/`, `.venv/`, `.npm-cache/`, and build outputs. -### Hardware Execution Backends +```text +Quadtrix.cpp/ +|-- .github/ +| |-- ISSUE_TEMPLATE/ +| |-- workflows/ +| | |-- ci.yml +| | `-- release.yml +| |-- dependabot.yml +| `-- pull_request_template.md +|-- backend/ +| |-- middleware/ +| | |-- error_handler.py +| | |-- logging.py +| | `-- __init__.py +| |-- router/ +| | |-- chat.py +| | |-- feedback.py +| | |-- health.py +| | |-- sessions.py +| | `-- __init__.py +| |-- README.md +| |-- config.py +| |-- inference.py +| |-- main.py +| |-- models.py +| |-- requirements.txt +| |-- server.py +| `-- session_store.py +|-- bin/ +| `-- quadtrix.js +|-- config/ +| `-- config.h +|-- data/ +| |-- data_set.py +| `-- input.txt +|-- engine/ +| |-- data/ +| | |-- data_set.py +| | `-- input.txt +| |-- fine-tune/ +| | |-- chat.py +| | |-- data-set.py +| | `-- main.py +| |-- logs/ +| |-- engine.c +| |-- export_weights.py +| |-- fineweb_30mb.txt +| |-- fineweb_dataset.py +| |-- inference.py +| `-- main.py +|-- frontend/ +| |-- public/ +| | |-- icon.svg +| | |-- manifest.webmanifest +| | `-- sw.js +| |-- src/ +| | |-- api/ +| | |-- components/ +| | |-- hooks/ +| | |-- store/ +| | |-- types/ +| | |-- utils/ +| | |-- App.tsx +| | |-- index.css +| | |-- main.tsx +| | `-- registerServiceWorker.ts +| |-- index.html +| |-- manifest.webmanifest +| |-- package.json +| |-- tailwind.config.ts +| |-- tsconfig.json +| |-- vite.config.ts +| `-- sw.js +|-- gpu/ +| |-- dataloader.h +| `-- model.h +|-- iGPU/ +| |-- inference.py +| `-- main.py +|-- include/ +| |-- attention.h +| |-- backward.h +| |-- block.h +| |-- dataloader.h +| |-- embedding.h +| |-- feedforward.h +| |-- gpt.h +| |-- layernorm.h +| |-- linear.h +| |-- tensor.h +| `-- torch_bridge.h +|-- model/ +| |-- Cmakelists.txt +| `-- export_tokenizer.py +|-- scripts/ +| `-- build_torch.ps1 +|-- src/ +| |-- torch_example.cpp +| `-- torch_main.cpp +|-- contributing.md +|-- LICENSE +|-- main.cpp +|-- package.json +|-- quadtrix.exe +|-- quadtrix_training_report.png +|-- README.md +|-- run.md +`-- SECURITY.md +``` -| Device | Technical Execution Pathway | -| :--- | :--- | -| **CPU** | Utilizes vectorized instructions (AVX/SSE) and multi-threading for sequential or small-batch inference. | -| **CUDA** | Leverages NVIDIA’s parallel computing platform for high-throughput training and inference on discrete GPUs. | -| **iGPU** | Targets Integrated GPUs (e.g., Intel Iris, AMD Radeon, or Apple Silicon M-series) via backends like **Metal (MPS)**, **DirectML**, or **oneAPI/SYCL**, optimizing for power-efficient local execution. | -## What is this? +### Important Files + +| File | Role | +|---|---| +| `main.cpp` | C++ entry point for training, generation, and terminal chat | +| `config/config.h` | C++ hyperparameters, data path defaults, checkpoint path defaults | +| `include/tensor.h` | Custom tensor operations used by the C++ model | +| `include/gpt.h` | GPT language model implementation and generation path | +| `include/backward.h` | Analytical backpropagation and AdamW optimizer state | +| `data/input.txt` | Default C++ training corpus | +| `engine/main.py` | PyTorch training script | +| `engine/inference.py` | PyTorch checkpoint loading and generation | +| `backend/main.py` | FastAPI application entry point | +| `backend/inference.py` | Backend adapter for PyTorch and C++ model services | +| `frontend/src/` | React chat application | +| `bin/quadtrix.js` | Node CLI wrapper for setup, chat, and training | + +## Requirements + +### Core Requirements + +| Tool | Version | Used For | +|---|---:|---| +| Python | 3.10+ recommended | Backend, PyTorch training, PyTorch inference | +| Node.js | 18+ | Frontend and CLI | +| npm | bundled with Node.js | Frontend dependencies | +| C++ compiler | C++17 support | Building `main.cpp` | +| Git | any recent version | Cloning and source control | + +### Python Dependencies + +The backend installs: + +```text +fastapi +uvicorn[standard] +pydantic +pydantic-settings +httpx +redis +torch +tiktoken +``` -Quadtrix.cpp is a transformer learning laboratory. Write your own backprop, debug attention matrices, export to bare-metal C. If you've read the Attention paper and want to *implement* it rather than just call `model.fit()`, this is for you. +These are declared in `backend/requirements.txt`. -**Philosophy**: Frameworks hide the fundamentals. This project reveals them. Every gradient, every checkpoint, every matrix multiply lives in code you can step through with a debugger. +### C++ Dependencies -**Parallel tracks**: Native C++ training path (educational), PyTorch path (faster iteration), DirectML path (Windows iGPU), pure C inference (deployment), web frontend (chat UI). +The native C++ path has no third-party runtime dependency. It builds from: -## Quick Start +```text +main.cpp +config/config.h +include/*.h +``` -```bash -# Native C++ path - train from scratch -g++ -std=c++17 -O2 -I. -Iinclude -o quadtrix main.cpp -./quadtrix data/input.txt +## Setup -# PyTorch path - faster experimentation -pip install torch tiktoken numpy -python engine/main.py +The commands below use PowerShell from the repository root: -# Interactive chat -./quadtrix data/input.txt --chat -python engine/inference.py +```powershell +cd C:\Users\Admin\Documents\GitHub\Quadtrix.cpp ``` -## Overview +### 1. Create a Python Virtual Environment -**Quadtrix** is a self-contained C++17 implementation of a GPT-style transformer language model trained at the character level. It implements the full pipeline — tokenisation, forward pass, analytical backpropagation, and autoregressive generation — in a single dependency-free codebase. +```powershell +python -m venv .venv +.\.venv\Scripts\python.exe -m pip install --upgrade pip +``` -The project is a faithful port of the Python/PyTorch training loop written in C++ with hand-derived gradients for every layer: linear projections, multi-head causal self-attention, layer normalisation, feed-forward blocks, softmax, ReLU, dropout, and cross-entropy loss. +### 2. Install Backend and PyTorch Dependencies -**Training run v1.0** used a 31M-character children's story corpus, reaching a validation loss of **1.6371** in 76 minutes on CPU. +```powershell +cd backend +..\.venv\Scripts\python.exe -m pip install -r requirements.txt +cd .. +``` ---- +### 3. Install Frontend Dependencies -## Architecture - -Quadtrix uses a decoder-only transformer (GPT-2 style) with pre-layer-normalisation residual blocks. - -``` -Input tokens [B, T] - │ - ▼ -Token Embedding [vocab_size × n_embd] - + -Position Embedding [block_size × n_embd] - │ - ▼ -┌─────────────────────────────────┐ -│ Transformer Block × n_layer │ -│ │ -│ ┌──────────────────────────┐ │ -│ │ LayerNorm (pre-LN) │ │ -│ └──────────┬───────────────┘ │ -│ ▼ │ -│ ┌──────────────────────────┐ │ -│ │ Multi-Head Causal Attn │ │ -│ │ n_head × head_size │ │ -│ │ + causal mask │ │ -│ │ + dropout │ │ -│ └──────────┬───────────────┘ │ -│ ▼ (residual +) │ -│ ┌──────────────────────────┐ │ -│ │ LayerNorm (pre-LN) │ │ -│ └──────────┬───────────────┘ │ -│ ▼ │ -│ ┌──────────────────────────┐ │ -│ │ Feed-Forward MLP │ │ -│ │ Linear → ReLU → Linear │ │ -│ │ (4× expansion) +dropout │ │ -│ └──────────┬───────────────┘ │ -│ ▼ (residual +) │ -└─────────────────────────────────┘ - │ - ▼ -LayerNorm → Linear → Logits [B, T, vocab_size] - │ - ▼ -Cross-Entropy Loss / Softmax + Multinomial Sampling -``` - -### Hyperparameters — v1.0 - -| Parameter | Value | Notes | -|------------------|---------|--------------------------------| -| `batch_size` | 4 | Sequences per step | -| `block_size` | 64 | Context window (tokens) | -| `n_embd` | 128 | Embedding dimension | -| `n_head` | 4 | Attention heads | -| `n_layer` | 4 | Transformer blocks | -| `dropout` | 0.2 | Applied to attn weights + proj | -| `learning_rate` | 3e-4 | AdamW, β₁=0.9, β₂=0.999 | -| `max_iters` | 3000 | | -| `eval_interval` | 200 | | -| **Total params** | **0.83 M** | (826,985) | +```powershell +cd frontend +npm.cmd install +cd .. +``` ---- +Use `npm.cmd` on Windows PowerShell if direct `npm` execution is blocked by PowerShell execution policy. + +### 4. Build the Frontend -## Training +```powershell +cd frontend +npm.cmd run build +cd .. +``` -The model was trained on a 31.4M-character corpus of short children's stories (`data/input.txt`), split 90/10 into train and validation sets. +### 5. Build the C++ Executable -| Set | Tokens | -|------------|-------------| -| Train | 28,311,139 | -| Validation | 3,145,683 | -| Vocabulary | 105 characters | +If `quadtrix.exe` already exists, this step is optional. To rebuild: -Training used full **analytical backpropagation** — hand-derived gradients through every operator (cross-entropy → lm_head → layernorm → MHA → FFN → embeddings) without any automatic differentiation library. +```powershell +g++ -std=c++17 -O2 -I. -Iinclude -o quadtrix.exe main.cpp +``` -The gradient computation follows this chain: +For extra CPU optimization on GCC/Clang: +```powershell +g++ -std=c++17 -O3 -march=native -I. -Iinclude -o quadtrix.exe main.cpp ``` -dLoss/dLogits → dW_lmhead → d(LayerNorm_f) - → for each Block (reverse): - d(FFN residual) → d(LN2) → d(fc2) → d(ReLU) → d(fc1) - d(MHA residual) → d(LN1) → d(proj) - → for each Head: - d(wei@V) → d(softmax) → d(scale) → d(Q@Kᵀ) - → d(Wk), d(Wq), d(Wv) - → d(tok_emb), d(pos_emb) + +## Run the Project + +Quadtrix UI screenshot + +### Option A: Run PyTorch Chat in the Web UI + +This is the simplest web path when a PyTorch checkpoint is available. + +Terminal 1: + +```powershell +cd C:\Users\Admin\Documents\GitHub\Quadtrix.cpp\backend +..\.venv\Scripts\python.exe -m uvicorn main:app --host 127.0.0.1 --port 3001 ``` ---- +Terminal 2: -## Training Log - -Training run: **Quadtrix v1.0** - -``` ------------------------------------------------------------- - Quadtrix v1.0 2026 - iter train val elapsed eta - ────────────────────────────────────────────────────── - 0/3000 4.6523 4.6570 15s — [saved] - 200/3000 2.4876 2.4478 427s 5976s [saved] - 400/3000 2.2965 2.3334 783s 5091s [saved] - 600/3000 2.2971 2.2572 1105s 4418s [saved] - 800/3000 2.2424 2.2018 1331s 3660s [saved] - 1000/3000 2.1570 2.2009 1569s 3138s [saved] - 1200/3000 2.0914 2.0577 1791s 2687s [saved] - 1400/3000 1.9575 2.0151 2013s 2301s [saved] - 1600/3000 1.9409 1.9532 2317s 2028s [saved] - 1800/3000 1.8233 1.8250 2673s 1782s [saved] - 2000/3000 1.7386 1.7724 2999s 1500s [saved] - 2200/3000 1.6850 1.7256 3353s 1219s [saved] - 2400/3000 1.7298 1.7403 3697s 924s - 2600/3000 1.7204 1.6680 4031s 620s [saved] - 2800/3000 1.5717 1.6471 4347s 310s [saved] - 3000/3000 1.7055 1.6371 4571s 0s [saved] - ────────────────────────────────────────────────────── - - done - | time : 4571.1s (76.2 min) - | best val : 1.6371 - | weights : best_model.bin ------------------------------------------------------------- -``` - -The model crosses 2.0 val loss around iteration 1200 and continues to improve steadily to **1.6371** — a drop of 3.02 nats from random initialisation. +```powershell +cd C:\Users\Admin\Documents\GitHub\Quadtrix.cpp\frontend +npm.cmd run dev +``` ---- +Open: -## Inference & Sample Outputs +```text +http://localhost:5173 +``` -Weights are loaded from `best_model.bin` and generation proceeds autoregressively: at each step the model consumes the last `block_size` tokens, computes logits over the vocabulary, applies softmax, and samples the next token from the resulting distribution. +Select the `.pt` or PyTorch backend in the UI. The FastAPI app loads the checkpoint through `engine/inference.py`. -### Chat Transcript +### Option B: Run C++ Terminal Chat -> The following samples were produced by the trained v1.0 checkpoint. -> The model operates at the character level and was trained on children's stories — expect emergent narrative structure, invented words, and short-form prose. +The C++ executable supports terminal chat directly: ---- +```powershell +cd C:\Users\Admin\Documents\GitHub\Quadtrix.cpp +.\quadtrix.exe data\input.txt --chat +``` -**You:** hi how are you +Set the number of generated tokens per answer: -**Quadtrix:** +```powershell +.\quadtrix.exe data\input.txt --chat --chat-tokens 300 ``` -said. You big his's reat cably hawere cast wen. Anted the on feer -the girl the heri beg that's appy and happecing apy his. Whe mommmy -had alowith of thad very the rand a coove the ses and the + +The C++ chat path requires `best_model.bin` unless `GPT_MODEL_PATH` points to another checkpoint. + +### Option C: Run C++ Generation + +```powershell +.\quadtrix.exe data\input.txt --generate ``` ---- +Generation streams tokens until interrupted with `Ctrl+C`. -**You:** what is life +### Option D: Train the C++ Model -**Quadtrix:** +```powershell +.\quadtrix.exe data\input.txt ``` -st happe. It happ a liked back abp happy thing flongs way. -"Lily lood take maked a fiside apie?" Tom and abed Timm. -A na starhe of her that they was is they," that and Timmy -flauch seer aing time tri + +Training writes the best checkpoint to: + +```text +best_model.bin ``` ---- +### Option E: Train the PyTorch Model + +```powershell +cd C:\Users\Admin\Documents\GitHub\Quadtrix.cpp +.\.venv\Scripts\python.exe engine\main.py +``` -**You:** Timmy is a +The PyTorch script looks for `engine/input.txt` by default. If that file is not present, point the script at an existing corpus with `QUADTRIX_TRAIN_DATA`: -**Quadtrix:** +```powershell +$env:QUADTRIX_TRAIN_DATA="C:\Users\Admin\Documents\GitHub\Quadtrix.cpp\data\input.txt" +.\.venv\Scripts\python.exe engine\main.py ``` -bog the scated justo prove the bret you. Timmy nevery some -the gecid. Her neplay to bet starked a way, that litked cliend. -They shat corsee get. -Ben it lows mured she happlay mom and had says." -Tom + +The PyTorch training script saves its checkpoint as: + +```text +best_model.pt ``` ---- +## C++ Backend + +The C++ implementation is the educational core of the project. + +### C++ Features -**You:** Once upon a time +- Character-level tokenizer built from the input corpus. +- Train/validation split through `DataLoader`. +- Decoder-only transformer architecture. +- Token and positional embeddings. +- Multi-head causal self-attention. +- Feed-forward MLP blocks. +- Pre-layer normalization and residual connections. +- Cross-entropy loss. +- Manual analytical backward pass. +- AdamW optimization. +- Checkpoint saving/loading. +- Autoregressive generation. +- Terminal chat mode. -**Quadtrix:** +### C++ Runtime Arguments + +```text +quadtrix.exe [data_path] [--generate] [--chat] [--chat-tokens N] ``` -, and said askiced and so owas said sri -The his brickerys and stew hhat and saw and stark a din't -She stingry and asked day. Timmy watch and played to cones. -It eve smors and he skiseed auch pived el make + +| Argument | Description | +|---|---| +| `data_path` | Plain-text corpus used to build the tokenizer and train/validation split | +| `--generate` | Load weights and continuously generate text | +| `--chat` | Load weights and start interactive terminal chat | +| `--chat-tokens N` | Set maximum generated tokens per chat response | + +### C++ Environment Variables + +| Variable | Default | Description | +|---|---|---| +| `GPT_DATA_PATH` | `data/input.txt` | Overrides the default C++ data file | +| `GPT_MODEL_PATH` | `best_model.bin` | Overrides the model checkpoint path | + +Example: + +```powershell +$env:GPT_MODEL_PATH="C:\models\quadtrix-best.bin" +.\quadtrix.exe data\input.txt --chat ``` ---- +## PyTorch Backend -### Extended Free Generation +The PyTorch path mirrors the transformer idea with `torch`, `torch.nn`, and `tiktoken`. +### Training + +```powershell +.\.venv\Scripts\python.exe engine\main.py ``` -usidpy the nis the girl Mian in the she foll pot stwed note ful to -snid sids funded araving many to them big strear birng and That the -momn,'s saw and icked aging for the cor dad and spzy ited thankeft -the beecia rear who rah gight sad, and and help ceated and blut the -waye! Shearted started, "Yes. I wout she feare scal the dingred asked -not dearve had s g. It was hat startedry like his -in the the was give grin Lily. -Tim ould and hoppen rand tce to the - faind her time. As and Ben't -the sise askep. It every and sticked Lia loshe wentimed toohld the -cookes and he gagayss in hen greveryby. +Important defaults in `engine/main.py`: + +| Parameter | Value | +|---|---:| +| `batch_size` | 16 | +| `block_size` | 32 | +| `max_iters` | 10000 | +| `eval_interval` | 10 | +| `learning_rate` | 1e-3 | +| `n_embd` | 64 | +| `n_head` | 4 | +| `n_layer` | 4 | +| `dropout` | 0.1 | +| Tokenizer | GPT-2 BPE through `tiktoken` | -One day, here stomed trreave one up in Annamecy it noted Mise with -that make tret a like. +### CLI Inference -Tom." -T'vey, ""As smie, a, "I's wurre and not make day, but tway it? -Lily. The stach, says eveere they am and then a to happprosh apper, -and his plh? That you obo. The garded rike, nothis to fring they -is his ared to shing itsed and old neved the pretoy beard shappy -hingse they him at happy his stroughts have nex's by. +Interactive chat: + +```powershell +.\.venv\Scripts\python.exe engine\inference.py --checkpoint "engine\best_model.pt" ``` -### Observations +Single prompt: -- The model has learned basic English word-level shapes after 3000 iterations despite training at the character level. -- Names (`Timmy`, `Lily`, `Tom`, `Ben`, `Mia`) appear consistently — likely the most frequent tokens in the story corpus. -- Dialogue punctuation (`"`, `'`, `,`) is placed plausibly. -- Sentence-level structure emerges: the model produces recognisable story beats ("One day,", "It was", "and said"). -- At 0.83M parameters and block_size=64, the context window is narrow; longer-range coherence will improve with larger `block_size` and more iterations. +```powershell +.\.venv\Scripts\python.exe engine\inference.py --checkpoint "engine\best_model.pt" --prompt "Once upon a time" --max-new-tokens 100 --temperature 1.0 +``` ---- +Available inference flags: + +| Flag | Description | +|---|---| +| `--checkpoint` | Path to `.pt` checkpoint | +| `--prompt` | Generate once instead of starting interactive chat | +| `--max-new-tokens` | Maximum generated tokens | +| `--temperature` | Sampling temperature | +| `--top-k` | Optional top-k sampling cutoff | + +## FastAPI Backend -## Main Structure +The production-style API lives in `backend/main.py`. +Start it with: + +```powershell +cd C:\Users\Admin\Documents\GitHub\Quadtrix.cpp\backend +..\.venv\Scripts\python.exe -m uvicorn main:app --host 127.0.0.1 --port 3001 ``` -Quadtrix.cpp/ -├── config/ -│ └── config.h # All hyperparameters — edit here to retrain -├── include/ -│ ├── tensor.h # CPU float tensor: 2D/3D ops, matmul, softmax, etc. -│ ├── linear.h # nn.Linear equivalent (weight + optional bias) -│ ├── embedding.h # nn.Embedding (token + position lookup tables) -│ ├── layernorm.h # nn.LayerNorm (γ/β, ε=1e-5) -│ ├── attention.h # Head (causal mask, scaled dot-product) + MultiHeadAttention -│ ├── feedforward.h # FeedForward: Linear → ReLU → Linear → Dropout -│ ├── block.h # Transformer Block (pre-LN, dual residual) -│ ├── gpt.h # GPTLanguageModel, cross_entropy, generate() -│ ├── dataloader.h # Char tokeniser (stoi/itos), train/val split, get_batch() -│ └── backward.h # Full analytical backprop + AdamW state -├── data/ -│ └── input.txt # Training corpus (plain text) -├── main.cpp # Training pipeline -└── README.md +The FastAPI backend provides: + +- `POST /api/chat` +- `GET /api/health` +- `GET /api/stats` +- session creation/listing/deletion +- message persistence through in-memory or Redis-backed session storage +- feedback capture +- CORS configuration for the frontend +- PyTorch checkpoint loading through `backend/inference.py` + +### Backend Environment + +Create `backend/.env` if you want to override defaults: + +```text +API_PORT=3001 +CORS_ORIGINS=http://localhost:5173 +REDIS_URL= +LOG_LEVEL=INFO +MAX_SESSIONS=1000 +SESSION_TTL_HOURS=24 +CPP_SERVER_URL=http://localhost:8080 +TORCH_CHECKPOINT_PATH=../engine/best_model.pt +REQUEST_TIMEOUT_SECONDS=60 ``` ---- +The checked-in backend default is `../engine/best_model .pt` with a space before `.pt`. If your checkpoint is named `best_model.pt`, set `TORCH_CHECKPOINT_PATH=../engine/best_model.pt` in `backend/.env`. + +Note: the FastAPI C++ adapter expects a C++-compatible HTTP service at `CPP_SERVER_URL` with `/health` and `/generate`. The current `main.cpp` source provides terminal training/chat/generation. Use the PyTorch backend for the web UI unless you have a compatible C++ HTTP service running. -## Building +## Frontend -**Requirements:** GCC ≥ 9 or Clang ≥ 10, C++17, no external dependencies. +The frontend is a React + TypeScript + Vite app. -```bash -# Compile -g++ -std=c++17 -O2 -I. -o quadtrix main.cpp +### Development Server -# Or use Make -make +```powershell +cd C:\Users\Admin\Documents\GitHub\Quadtrix.cpp\frontend +npm.cmd run dev +``` -# Train on your own text -./quadtrix data/input.txt +Open: -# Generate only (loads best_model.bin) -./quadtrix data/input.txt --generate +```text +http://localhost:5173 ``` ---- +### Production Build + +```powershell +npm.cmd run build +``` + +### Preview Production Build + +```powershell +npm.cmd run preview +``` + +Open the preview URL, usually: + +```text +http://localhost:4173 +``` + +### PWA Files + +The frontend includes installable web app files: + +```text +frontend/manifest.webmanifest +frontend/sw.js +frontend/public/manifest.webmanifest +frontend/public/sw.js +frontend/src/registerServiceWorker.ts +``` + +Install from Chrome or Edge after running the production preview. The installed app still needs the FastAPI backend running at the configured API URL. + +## NPM CLI + +The root package exposes a `quadtrix` CLI through `bin/quadtrix.js`. + +```text +quadtrix chat [--api-port 3001] [--web-port 5173] [--no-open] +quadtrix train --backend cpp [--data data/input.txt] +quadtrix train --backend python +quadtrix setup +``` + +From the repository root: + +```powershell +npm.cmd install +npm.cmd run build:frontend +node bin\quadtrix.js setup +``` + +Run the packaged chat wrapper: + +```powershell +node bin\quadtrix.js chat --api-port 3001 --web-port 5173 +``` + +Train through the wrapper: + +```powershell +node bin\quadtrix.js train --backend cpp --data data\input.txt +node bin\quadtrix.js train --backend python --data data\input.txt +``` ## Configuration -All hyperparameters live in `config/config.h`. Rebuild after any change. +### C++ Configuration + +Edit `config/config.h` and rebuild the C++ executable. ```cpp -// config/config.h +static const int BATCH_SIZE = 4; +static const int BLOCK_SIZE = 64; +static const int MAX_ITERS = 3000; +static const int EVAL_INTERVAL = 200; +static const float LEARNING_RATE = 3e-4f; +static const int EVAL_ITERS = 10; +static const int N_EMBD = 128; +static const int N_HEAD = 4; +static const int N_LAYER = 4; +static const float DROPOUT = 0.2f; +``` -static const int BATCH_SIZE = 4; // sequences per gradient step -static const int BLOCK_SIZE = 64; // context window length -static const int MAX_ITERS = 3000; // total training iterations -static const int EVAL_INTERVAL = 200; // evaluate every N steps -static const float LEARNING_RATE = 3e-4f; // AdamW learning rate -static const int EVAL_ITERS = 10; // batches per eval estimate -static const int N_EMBD = 128; // embedding dimension -static const int N_HEAD = 4; // number of attention heads -static const int N_LAYER = 4; // number of transformer blocks -static const float DROPOUT = 0.2f; // dropout probability +### C++ Model Shape + +| Parameter | Meaning | +|---|---| +| `BATCH_SIZE` | Number of sequences per gradient step | +| `BLOCK_SIZE` | Context length in tokens | +| `N_EMBD` | Embedding width | +| `N_HEAD` | Number of attention heads | +| `N_LAYER` | Number of transformer blocks | +| `DROPOUT` | Dropout probability during training | +| `LEARNING_RATE` | AdamW learning rate | +| `MAX_ITERS` | Total training iterations | +| `EVAL_INTERVAL` | Evaluation/checkpoint interval | + +### Scaling Guide + +| Goal | Change | +|---|---| +| Better local coherence | Increase `BLOCK_SIZE` | +| Higher model capacity | Increase `N_EMBD` and `N_LAYER` | +| Faster CPU runs | Use fewer layers or lower embedding width | +| Faster optimized build | Compile with `-O3 -march=native` | +| More stable loss estimates | Increase `EVAL_ITERS` | + +## API Reference + +Base URL: + +```text +http://localhost:3001 ``` -### Scaling guide +### Health + +```text +GET /api/health +``` -| Target | Suggestion | -|------------------|---------------------------------------------------------| -| Better coherence | ↑ `block_size` (256–512), ↑ `n_embd` (256+) | -| Faster training | ↑ `batch_size`, compile with `-O3 -march=native` | -| Smaller model | ↓ `n_layer` (2), ↓ `n_embd` (64) | -| More parameters | ↑ `n_embd` (512), ↑ `n_layer` (6–8) | +Example: ---- +```powershell +Invoke-RestMethod http://localhost:3001/api/health +``` -## Design Notes +### Stats -### Why C++17, no dependencies? +```text +GET /api/stats +``` -The goal is full transparency. Every multiply-accumulate, every softmax row, every gradient derivation is readable in the source. There is no framework between the math and the metal. +Example: -### Analytical backprop +```powershell +Invoke-RestMethod http://localhost:3001/api/stats +``` -Rather than automatic differentiation, Quadtrix implements explicit backward passes for each operator. The derivations follow the standard formulations: +### Chat -- **Cross-entropy:** `d_logits = softmax(logits) − one_hot(target)` scaled by `1/BT` -- **Linear:** `dX = dOut @ Wᵀ`, `dW += Xᵀ @ dOut`, `db += Σ dOut` -- **LayerNorm:** Ba et al. (2016) three-term formula via saved `μ`, `σ⁻¹`, `x̂` -- **Softmax:** `d_pre[i] = s[i] * (d[i] − Σⱼ s[j] d[j])` -- **ReLU:** `dX[i] = dOut[i] if pre_relu[i] > 0 else 0` -- **Attention:** product rule through `Q @ Kᵀ`, causal mask zeros upper-triangle grads -- **Embeddings:** scatter-add for tokens, batch-sum for positions +```text +POST /api/chat +``` -### Causal masking +PyTorch backend example: + +```powershell +Invoke-RestMethod ` + -Uri http://localhost:3001/api/chat ` + -Method Post ` + -ContentType "application/json" ` + -Body '{ + "session_id": null, + "prompt": "Once upon a time", + "max_tokens": 100, + "temperature": 1.0, + "stream": false, + "model_backend": "torch" + }' +``` -The upper-triangular mask is applied before softmax by setting future positions to `-1e30`. During backprop these positions receive zero gradient (the `-inf` entries have zero softmax output, so `s[i] * (...)` = 0). +C++ backend example, when a compatible C++ HTTP service is available: + +```powershell +Invoke-RestMethod ` + -Uri http://localhost:3001/api/chat ` + -Method Post ` + -ContentType "application/json" ` + -Body '{ + "session_id": null, + "prompt": "Once upon a time", + "max_tokens": 100, + "temperature": 1.0, + "stream": false, + "model_backend": "cpp" + }' +``` + +### Sessions -### Dropout +```text +GET /api/sessions +POST /api/sessions +DELETE /api/sessions/{id} +GET /api/sessions/{id}/messages +``` -Both the attention weight matrix and the projection output have independent dropout masks sampled during each forward pass. The same masks are stored and reused in the backward pass (`d = d * mask / (1 - p)`). +### Feedback ---- -## Training Metrics +```text +POST /api/feedback +``` + +## Training Details + +Quadtrix uses a decoder-only transformer in the GPT family. + +```text +Input token IDs [B, T] + -> token embedding + -> positional embedding + -> transformer block x N + -> layer norm + -> masked multi-head self-attention + -> residual add + -> layer norm + -> feed-forward MLP + -> residual add + -> final layer norm + -> language-model head + -> logits [B, T, vocab_size] + -> cross-entropy loss +``` + +### C++ Manual Backpropagation + +The C++ path implements gradients explicitly through: + +- Cross-entropy and softmax. +- Final projection layer. +- Final layer normalization. +- Feed-forward MLP layers. +- ReLU activation. +- Attention projection. +- Scaled dot-product attention. +- Causal mask. +- Query, key, and value projections. +- Token and positional embeddings. + +Gradient flow: + +```text +dLoss/dLogits + -> lm_head + -> final layer norm + -> each transformer block in reverse + -> feed-forward residual branch + -> layer norm 2 + -> MLP fc2, activation, fc1 + -> attention residual branch + -> layer norm 1 + -> output projection + -> each attention head + -> attention weights @ value + -> softmax + -> scaled QK^T + -> query/key/value projections + -> token embedding gradients + -> position embedding gradients +``` -The training report visualizes three critical dynamics: +### Causal Masking -**Loss curves** (left panel): Cross-entropy decreases from 4.5 to 1.6 over 3000 iterations. Training and validation losses track closely, indicating effective learning without severe overfitting. +Future tokens are masked before softmax so each position can only attend to current and previous positions. Masked entries receive zero useful probability mass and therefore do not contribute meaningful gradient during the attention backward pass. -**Wall-clock efficiency** (middle panel): Linear relationship between validation loss and elapsed time demonstrates consistent GPU utilization and efficient batching. +### Optimizer -**Generalization gap** (right panel): Difference between validation and training loss oscillates around zero with peak divergence of 0.0754. This healthy pattern suggests the model learns general patterns rather than memorizing training data. +The C++ path uses AdamW-style updates with: -**Final metrics**: -- Validation loss: **1.6371** (iteration 3000) -- Training parameters: 0.83M params, 105 vocab tokens, 28.3M training / 3.1M validation tokens -- Architecture: `n_layer=4, n_embd=128` +- First moment estimate. +- Second moment estimate. +- Weight decay. +- Learning-rate-controlled parameter updates. + +## Reports and Results + +### Leaderboard + +| s.no | Time | Val BPB / Loss | Core | Description | Date | Contributors | +|---:|---:|---:|---:|---|---|---| +| 0 | 39.4 min | 1.3145 | 0.82M | Quadtrix CPU baseline, small data, fragmented output | 2026 | @Eamon2009 | +| 1 | 61.3 min | 0.7176 | 10.82M | Quadtrix Colab large-scale run, coherent paragraphs, strong convergence | 2026 | @Eamon2009 | +| 2 | 6.1 min | 0.9250 | 1.99M | Quadtrix T4 optimized run, fast training, stable learning, basic coherence | 2026 | @Eamon2009 | +| 3 | 76.2 min | 1.6371 | ~0.82M | Quadtrix.cpp extended CPU training, 3000 iterations | 2026 | @Eamon2009 | + +### Hardware Execution Backends + +| Device | Execution Path | +|---|---| +| CPU | Native C++ implementation and PyTorch CPU fallback | +| CUDA | PyTorch CUDA acceleration when available | +| iGPU | Experimental scripts for integrated-GPU style execution paths | + +### Training Metrics + +Final C++ run metrics documented in the project: + +| Metric | Value | +|---|---:| +| Validation loss | 1.6371 | +| Parameters | 826,985 | +| Vocabulary | 105 characters | +| Training tokens | ~28.3M | +| Validation tokens | ~3.1M | +| Training time | ~76.2 min | +| Architecture | 4 layers, 4 heads, 128 embedding width | + +### Python (PyTorch) -## Training Comparison & Scaling Analysis -## Python (Pytorch) ![Training Report](quadtrix_training_report.png) -## c++ -Screenshot 2026-04-24 182327 +### C++ -The following table compares three distinct training runs across different architectures and datasets, demonstrating empirical scaling law behavior: +C++ training screenshot -| Metric | **Run 1: Character-Level** | **Run 2: Small Scale** | **Run 3: Large Scale** | -|--------|---------------------------|------------------------|------------------------| -| **Architecture** | | | | +### Training Comparison + +| Metric | Character-Level | Small Scale | Large Scale | +|---|---:|---:|---:| | Parameters | 0.83M | 2.00M | 19.17M | | Layers | 4 | 4 | 4 | | Embedding Dim | 128 | 200 | 200 | | Attention Heads | 4 | 4 | 4 | | Context Length | 64 | 200 | 200 | -| **Dataset** | | | | -| Corpus | `tinystories` | `tinystories` | Children's Stories | -| Vocab Size | 105 (char) | 110 (char) | ~50K (BPE) | -| Training Tokens | 28.3M | 79.6M | Unknown | -| Validation Tokens | 3.1M | 8.8M | Unknown | -| Data Volume | - | 88.5 MB | - | -| **Training Config** | | | | +| Corpus | TinyStories | TinyStories | Children's Stories | +| Vocab Size | 105 char | 110 char | ~50K BPE | | Total Iterations | 3,000 | 5,000 | 5,000 | -| Hardware | CPU/CUDA | CUDA (Tesla T4) | Unknown | -| Wall-Clock Time | ~76 min | 5.97 min | Unknown | -| Throughput | - | ~838 iter/min | - | -| **Final Performance** | | | | -| **Train Loss** | 1.5632 | **0.9045** | Unknown | -| **Val Loss** | **1.6371** | **0.9301** | Unknown | +| Hardware | CPU/CUDA | CUDA T4 | Unknown | +| Final Train Loss | 1.5632 | 0.9045 | Unknown | +| Final Val Loss | 1.6371 | 0.9301 | Unknown | | Generalization Gap | 0.0739 | 0.0256 | Unknown | -| Peak Gap | 0.0754 @ iter 2800 | Unknown | Unknown | -| **Convergence** | | | | -| Initial Loss | 4.5 | 4.6946 | ~5.0 | -| Loss Reduction | 65.7% | 80.2% | ~80% | -| Saved Checkpoints | Every 200 iters | Every 200 iters | Multiple | -| Best Iteration | 3000 | 4999 | Unknown | -### Scaling Law Observations +### Comparison to Related Projects -**1. Parameter Count vs Performance** +| Project | Focus | Language | Autograd | +|---|---|---|---| +| nanoGPT | Minimal GPT training | Python | PyTorch | +| minGPT | Educational GPT | Python | PyTorch | +| llama2.c | Inference-oriented C implementation | C | None | +| Quadtrix.cpp | Training, inference, web UI, multi-backend experiments | C++/Python/TypeScript | Manual C++ + PyTorch | -The relationship between model size and loss follows the expected power law: +## Troubleshooting -``` -L(N) ∝ N^(-α) -``` +### PowerShell Blocks `npm` -Where `N` is parameter count and `α ≈ 0.076` based on our data: -- 0.83M params → Val Loss 1.6371 -- 2.00M params → Val Loss 0.9301 (43.2% reduction for 2.4× params) -- 19.17M params → Expected Val Loss ~0.65-0.75 (extrapolated) +Use: -**2. Data Efficiency** +```powershell +npm.cmd install +npm.cmd run dev +npm.cmd run build +``` -Token scaling shows diminishing returns: -- Run 1: 28.3M tokens @ 1.6371 loss -- Run 2: 79.6M tokens @ 0.9301 loss (2.8× data → 43% loss reduction) +### Python Backend Cannot Import Dependencies -This suggests we're in the data-limited regime where increasing model capacity yields better returns than increasing data alone. +Install dependencies into the repository virtual environment: -**3. Compute Efficiency** +```powershell +cd C:\Users\Admin\Documents\GitHub\Quadtrix.cpp\backend +..\.venv\Scripts\python.exe -m pip install -r requirements.txt +``` -Run 2 achieved superior performance despite shorter wall-clock time (5.97 min vs 76 min), highlighting the importance of: -- Hardware acceleration (Tesla T4 CUDA) -- Larger batch processing -- Optimized data pipeline +### PyTorch Checkpoint Is Missing -**4. Generalization Dynamics** +Check the configured checkpoint path: -Both runs show healthy train/val convergence: -- Run 1: Final gap of 0.0739 (4.5% relative) -- Run 2: Final gap of 0.0256 (2.8% relative) +```powershell +Test-Path "C:\Users\Admin\Documents\GitHub\Quadtrix.cpp\engine\best_model.pt" +``` -Smaller gap in Run 2 suggests better regularization or more diverse training data per parameter. +If it is missing, train first: -**5. Neural Scaling Law Projection** +```powershell +cd C:\Users\Admin\Documents\GitHub\Quadtrix.cpp +.\.venv\Scripts\python.exe engine\main.py +``` -Extrapolating from our empirical data: +### C++ Chat Cannot Find Weights -| Target Loss | Estimated Params | Estimated Tokens | Expected Compute | -|-------------|-----------------|------------------|------------------| -| 1.0 | ~1.5M | ~50M | ~2-3 min (T4) | -| 0.8 | ~3-4M | ~100M | ~8-12 min (T4) | -| 0.6 | ~15-20M | ~300M | ~40-60 min (T4) | -| 0.5 | ~40-50M | ~1B | ~3-5 hours (T4) | +The C++ chat and generation modes need `best_model.bin`. -**Chinchilla-optimal ratio**: For compute-efficient training at this scale, target N ≈ 20 × D (parameters ≈ 20 × training tokens in billions). +Train first: -1. **Scaling works**: Doubling parameters reduces loss by ~30-40% consistently -2. **Hardware matters**: GPU acceleration provides 12× speedup with better loss -3. **Small models saturate quickly**: Beyond 5K iterations, gains diminish without more capacity -4. **Character-level is competitive**: At small scale, character models perform reasonably despite simpler tokenization -5. **Generalization is healthy**: Both runs avoid severe overfitting, suggesting good regularization defaults +```powershell +.\quadtrix.exe data\input.txt +``` +Or point to an existing checkpoint: -## Comparison to Other Projects +```powershell +$env:GPT_MODEL_PATH="C:\path\to\best_model.bin" +.\quadtrix.exe data\input.txt --chat +``` -| Project | Focus | Language | Autograd | -|---------|-------|----------|----------| -| **nanoGPT** | Minimal PyTorch GPT | Python | PyTorch | -| **llama2.c** | Inference only | C | None | -| **minGPT** | Educational PyTorch | Python | PyTorch | -| **Quadtrix.cpp** | Training + inference, multi-backend | C++/Python/C | Manual + PyTorch | +### Frontend Cannot Reach Backend -## Building From Source +Start the FastAPI backend: -**Requirements**: -- C++17 compiler (GCC 7+, Clang 5+, MSVC 2017+) -- Python 3.8+ (for PyTorch paths) -- CMake 3.15+ (for LibTorch experiments) +```powershell +cd C:\Users\Admin\Documents\GitHub\Quadtrix.cpp\backend +..\.venv\Scripts\python.exe -m uvicorn main:app --host 127.0.0.1 --port 3001 +``` + +Then check: -**Minimal build** (C++ only): -```bash -g++ -std=c++17 -O2 -I. -Iinclude -o quadtrix main.cpp +```powershell +Invoke-RestMethod http://localhost:3001/api/health ``` -**With LibTorch**: -```bash -# Download libtorch from pytorch.org -cmake -S . -B build -DCMAKE_PREFIX_PATH=/path/to/libtorch -cmake --build build --config Release +### Port Already in Use + +Check common ports: + +```powershell +Get-NetTCPConnection -LocalPort 3001 +Get-NetTCPConnection -LocalPort 5173 +Get-NetTCPConnection -LocalPort 4173 +Get-NetTCPConnection -LocalPort 8080 ``` -**Python environment**: -```bash -python -m venv venv -source venv/bin/activate # Windows: venv\Scripts\activate -pip install torch tiktoken numpy +### Rebuild C++ After Changing Hyperparameters + +Any change in `config/config.h` requires recompilation: + +```powershell +g++ -std=c++17 -O2 -I. -Iinclude -o quadtrix.exe main.cpp ``` ---- -## Reference -- Architecture based on "Attention Is All You Need" (Vaswani et al., 2017) -- GPT-2 (Radford et al., 2019). +## References ---- +- Vaswani et al., "Attention Is All You Need", 2017. +- Radford et al., GPT-2 technical work, 2019. +- nanoGPT and minGPT as educational reference points. ## License MIT - -*Quadtrix.cpp · val loss 1.6371 · 0.83M params · 76 min on CPU* diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..b19496f --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,73 @@ +# Security Policy + +Quadtrix.cpp is a local-first transformer learning project. The repository includes native C++ code, Python inference and training code, a FastAPI backend, a React frontend, model checkpoints, and sample datasets. Security reports are welcome, especially when they affect local execution, backend API exposure, dependency safety, or handling of model/data files. + +## Supported Versions + +Security fixes are considered for: + +| Version | Supported | +| --- | --- | +| `master` / active development branch | Yes | +| Latest tagged release | Yes | +| Older releases | Best effort | + +If a fix affects both the C++ runtime and the Python or web paths, please call that out in the report so the patch can cover the whole stack. + +## Reporting a Vulnerability + +Please do not open a public issue for a vulnerability that could put users at risk. + +Preferred reporting path: + +1. Use GitHub's private vulnerability reporting or security advisory flow for this repository, if available. +2. Include enough detail to reproduce the issue locally. +3. Share the affected component: C++ runtime, Python engine, FastAPI backend, React frontend, packaging, model files, or documentation. + +Helpful details include: + +- Affected commit, branch, or release. +- Operating system and runtime versions. +- Exact command or request used to reproduce the issue. +- Expected behavior and actual behavior. +- Logs, stack traces, or crash output. +- Whether the issue requires a crafted model, dataset, prompt, HTTP request, or environment variable. + +You should receive an acknowledgement as soon as the report is reviewed. Fix timing depends on impact and complexity. + +## Security Scope + +In scope examples: + +- Memory safety bugs in the native C++ runtime. +- Crashes or denial-of-service issues caused by malformed input files, prompts, checkpoints, or HTTP requests. +- Backend API behavior that exposes local files, environment variables, model paths, prompts, sessions, or generated text unexpectedly. +- Unsafe dependency updates or dependency confusion risks in Python, npm, or GitHub Actions. +- Cross-site scripting, service worker, or PWA issues in the frontend. +- Secret leakage through logs, generated artifacts, bundled files, or example configuration. + +Out of scope examples: + +- Expected model hallucinations or low-quality generated text. +- Prompt injection against a local toy model without a concrete data exposure or code execution path. +- Reports that require already having arbitrary code execution on the user's machine. +- Vulnerabilities in third-party services or packages unless the repository uses them in an unsafe way. + +## Local Deployment Notes + +Quadtrix.cpp is intended to run locally during development. + +- Keep the FastAPI backend bound to `127.0.0.1` unless you intentionally expose it. +- Do not commit real `.env` files, tokens, API keys, private datasets, or private checkpoints. +- Treat downloaded model checkpoints and datasets as untrusted inputs. +- Rebuild native binaries from source when possible instead of trusting unknown executables. +- Review changes to service workers, PWA manifests, backend routing, and CORS settings carefully. + +## Dependency Updates + +The project uses C++, Python, npm, and GitHub Actions. When updating dependencies: + +- Prefer minimal version bumps tied to a clear reason. +- Run the relevant local checks from `contributing.md`. +- Watch for lockfile changes and generated build output. +- Note any security-related update in the pull request summary. diff --git a/backend/.env.example b/backend/.env.example new file mode 100644 index 0000000..86a59a5 --- /dev/null +++ b/backend/.env.example @@ -0,0 +1,9 @@ +API_PORT=3001 +CORS_ORIGINS=http://localhost:5173 +REDIS_URL= +LOG_LEVEL=INFO +MAX_SESSIONS=1000 +SESSION_TTL_HOURS=24 +CPP_SERVER_URL=http://localhost:8080 +TORCH_CHECKPOINT_PATH=../engine/best_model .pt +REQUEST_TIMEOUT_SECONDS=60 diff --git a/backend/README.md b/backend/README.md new file mode 100644 index 0000000..ee3c735 --- /dev/null +++ b/backend/README.md @@ -0,0 +1,16 @@ +# Quadtrix Middleware + +FastAPI middleware for the Quadtrix.cpp C++ inference server. + +## Run + +```bash +pip install -r requirements.txt +uvicorn main:app --port 3001 --reload +``` + +Start the C++ server first: + +```bash +./Quadtrix data/input.txt --server --port 8080 +``` diff --git a/backend/config.py b/backend/config.py new file mode 100644 index 0000000..07ab4d9 --- /dev/null +++ b/backend/config.py @@ -0,0 +1,28 @@ +from functools import lru_cache +from typing import List + +from pydantic import Field +from pydantic_settings import BaseSettings, SettingsConfigDict + + +class Settings(BaseSettings): + api_port: int = Field(default=3001, alias="API_PORT") + cors_origins: str = Field(default="http://localhost:5173", alias="CORS_ORIGINS") + redis_url: str = Field(default="", alias="REDIS_URL") + log_level: str = Field(default="INFO", alias="LOG_LEVEL") + max_sessions: int = Field(default=1000, alias="MAX_SESSIONS") + session_ttl_hours: int = Field(default=24, alias="SESSION_TTL_HOURS") + cpp_server_url: str = Field(default="http://localhost:8080", alias="CPP_SERVER_URL") + torch_checkpoint_path: str = Field(default="../engine/best_model .pt", alias="TORCH_CHECKPOINT_PATH") + request_timeout_seconds: float = Field(default=60.0, alias="REQUEST_TIMEOUT_SECONDS") + + model_config = SettingsConfigDict(env_file=".env", extra="ignore", populate_by_name=True) + + @property + def cors_origin_list(self) -> List[str]: + return [origin.strip() for origin in self.cors_origins.split(",") if origin.strip()] + + +@lru_cache +def get_settings() -> Settings: + return Settings() diff --git a/backend/inference.py b/backend/inference.py new file mode 100644 index 0000000..6f26a0e --- /dev/null +++ b/backend/inference.py @@ -0,0 +1,131 @@ +import asyncio +import importlib.util +import sys +import time +from pathlib import Path +from types import ModuleType +from typing import Any, Dict, Optional + +import httpx + +from config import Settings +from models import CppGenerateResponse + + +class InferenceUnavailableError(Exception): + def __init__(self, message: str) -> None: + self.message = message + super().__init__(message) + + +class InferenceClient: + def __init__(self, settings: Settings) -> None: + self.settings = settings + self.torch_runner = TorchInferenceRunner(settings) + + async def generate(self, prompt: str, max_tokens: int, temperature: float, model_backend: str) -> CppGenerateResponse: + if model_backend == "torch": + return await self.torch_runner.generate(prompt=prompt, max_tokens=max_tokens, temperature=temperature) + return await self.generate_cpp(prompt=prompt, max_tokens=max_tokens) + + async def generate_cpp(self, prompt: str, max_tokens: int) -> CppGenerateResponse: + url = f"{self.settings.cpp_server_url.rstrip('/')}/generate" + try: + async with httpx.AsyncClient(timeout=self.settings.request_timeout_seconds) as client: + response = await client.post(url, json={"prompt": prompt, "max_tokens": max_tokens}) + response.raise_for_status() + except httpx.TimeoutException as exc: + raise InferenceUnavailableError("The C++ inference server timed out") from exc + except httpx.HTTPError as exc: + raise InferenceUnavailableError( + f"The C++ inference server is not reachable at {self.settings.cpp_server_url}" + ) from exc + + try: + return CppGenerateResponse.model_validate(response.json()) + except ValueError as exc: + raise InferenceUnavailableError("The C++ inference server returned invalid JSON") from exc + + async def health(self) -> Dict[str, Any]: + url = f"{self.settings.cpp_server_url.rstrip('/')}/health" + try: + async with httpx.AsyncClient(timeout=5.0) as client: + response = await client.get(url) + response.raise_for_status() + return response.json() + except httpx.HTTPError as exc: + raise InferenceUnavailableError( + f"The C++ inference server is not reachable at {self.settings.cpp_server_url}" + ) from exc + + def torch_health(self) -> bool: + return self.torch_runner.is_available() + + +class TorchInferenceRunner: + def __init__(self, settings: Settings) -> None: + self.settings = settings + self._module: Optional[ModuleType] = None + self._model: Optional[Any] = None + + def checkpoint_path(self) -> Path: + path = Path(self.settings.torch_checkpoint_path) + if path.is_absolute(): + return path + return (Path(__file__).resolve().parent / path).resolve() + + def engine_inference_path(self) -> Path: + return (Path(__file__).resolve().parents[1] / "engine" / "inference.py").resolve() + + def is_available(self) -> bool: + return self.checkpoint_path().exists() and self.engine_inference_path().exists() + + def _load_module(self) -> ModuleType: + if self._module is not None: + return self._module + module_path = self.engine_inference_path() + if not module_path.exists(): + raise InferenceUnavailableError(f"PyTorch inference file not found at {module_path}") + spec = importlib.util.spec_from_file_location("quadtrix_engine_inference", module_path) + if spec is None or spec.loader is None: + raise InferenceUnavailableError("Could not load engine/inference.py") + module = importlib.util.module_from_spec(spec) + sys.modules["quadtrix_engine_inference"] = module + spec.loader.exec_module(module) + self._module = module + return module + + def _load_model(self) -> Any: + if self._model is not None: + return self._model + checkpoint = self.checkpoint_path() + if not checkpoint.exists(): + raise InferenceUnavailableError(f"PyTorch checkpoint not found at {checkpoint}") + module = self._load_module() + self._model = module.load_model(checkpoint) + return self._model + + def _generate_sync(self, prompt: str, max_tokens: int, temperature: float) -> CppGenerateResponse: + started = time.monotonic() + module = self._load_module() + model = self._load_model() + text = module.generate_response( + model=model, + prompt=prompt, + max_new_tokens=max_tokens, + temperature=temperature, + top_k=None, + ) + seconds = round(time.monotonic() - started, 3) + return CppGenerateResponse(text=text, chars=len(text), seconds=seconds) + + async def generate(self, prompt: str, max_tokens: int, temperature: float) -> CppGenerateResponse: + try: + return await asyncio.wait_for( + asyncio.to_thread(self._generate_sync, prompt, max_tokens, temperature), + timeout=self.settings.request_timeout_seconds, + ) + except asyncio.TimeoutError as exc: + raise InferenceUnavailableError("The PyTorch model timed out") from exc + except (RuntimeError, FileNotFoundError, ImportError, AttributeError) as exc: + raise InferenceUnavailableError(f"The PyTorch model is unavailable: {exc}") from exc diff --git a/backend/main.py b/backend/main.py new file mode 100644 index 0000000..ee33d0d --- /dev/null +++ b/backend/main.py @@ -0,0 +1,46 @@ +import uvicorn +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware + +from config import get_settings +from inference import InferenceClient +from middleware.error_handler import register_error_handlers +from middleware.logging import RequestLoggingMiddleware, configure_logging +from router import chat, feedback, health, sessions +from session_store import build_session_store + + +settings = get_settings() +configure_logging(settings.log_level) + +app = FastAPI(title="Quadtrix API", version="1.0.0") +app.state.session_store = build_session_store( + max_sessions=settings.max_sessions, + ttl_hours=settings.session_ttl_hours, + redis_url=settings.redis_url, +) +app.state.inference_client = InferenceClient(settings) + +app.add_middleware( + CORSMiddleware, + allow_origins=settings.cors_origin_list or ["http://localhost:5173"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) +app.add_middleware(RequestLoggingMiddleware) +register_error_handlers(app) + +app.include_router(health.router) +app.include_router(sessions.router) +app.include_router(chat.router) +app.include_router(feedback.router) + + +@app.get("/") +async def root() -> dict[str, str]: + return {"status": "ok", "service": "quadtrix-api"} + + +if __name__ == "__main__": + uvicorn.run("main:app", host="0.0.0.0", port=settings.api_port, reload=True) diff --git a/backend/middleware/__init__.py b/backend/middleware/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/backend/middleware/__init__.py @@ -0,0 +1 @@ + diff --git a/backend/middleware/error_handler.py b/backend/middleware/error_handler.py new file mode 100644 index 0000000..4d7cbe6 --- /dev/null +++ b/backend/middleware/error_handler.py @@ -0,0 +1,28 @@ +from fastapi import FastAPI, HTTPException, Request +from fastapi.exceptions import RequestValidationError +from fastapi.responses import JSONResponse + + +def register_error_handlers(app: FastAPI) -> None: + @app.exception_handler(HTTPException) + async def http_exception_handler(request: Request, exc: HTTPException) -> JSONResponse: + if isinstance(exc.detail, dict) and {"error", "message", "code"}.issubset(exc.detail.keys()): + return JSONResponse(status_code=exc.status_code, content=exc.detail) + return JSONResponse( + status_code=exc.status_code, + content={"error": "request_error", "message": str(exc.detail), "code": exc.status_code}, + ) + + @app.exception_handler(RequestValidationError) + async def validation_exception_handler(request: Request, exc: RequestValidationError) -> JSONResponse: + return JSONResponse( + status_code=422, + content={"error": "validation_error", "message": "Request validation failed", "code": 422}, + ) + + @app.exception_handler(Exception) + async def unhandled_exception_handler(request: Request, exc: Exception) -> JSONResponse: + return JSONResponse( + status_code=500, + content={"error": "internal_error", "message": "Unexpected server error", "code": 500}, + ) diff --git a/backend/middleware/logging.py b/backend/middleware/logging.py new file mode 100644 index 0000000..8931721 --- /dev/null +++ b/backend/middleware/logging.py @@ -0,0 +1,45 @@ +import json +import logging +import time +from datetime import datetime, timezone +from typing import Callable + +from fastapi import Request, Response +from starlette.middleware.base import BaseHTTPMiddleware + + +class JsonFormatter(logging.Formatter): + def format(self, record: logging.LogRecord) -> str: + payload = { + "timestamp": datetime.now(timezone.utc).isoformat(), + "level": record.levelname, + "message": record.getMessage(), + } + for key in ("method", "path", "status_code", "latency_ms", "session_id", "prompt_length", "latency", "chars"): + value = getattr(record, key, None) + if value is not None: + payload[key] = value + return json.dumps(payload, separators=(",", ":")) + + +def configure_logging(level: str) -> None: + handler = logging.StreamHandler() + handler.setFormatter(JsonFormatter()) + logging.basicConfig(level=level.upper(), handlers=[handler], force=True) + + +class RequestLoggingMiddleware(BaseHTTPMiddleware): + async def dispatch(self, request: Request, call_next: Callable[[Request], Response]) -> Response: + started = time.monotonic() + response = await call_next(request) + latency_ms = round((time.monotonic() - started) * 1000, 2) + logging.getLogger("quadtrix.api").info( + "http_request", + extra={ + "method": request.method, + "path": request.url.path, + "status_code": response.status_code, + "latency_ms": latency_ms, + }, + ) + return response diff --git a/backend/models.py b/backend/models.py new file mode 100644 index 0000000..a8f466b --- /dev/null +++ b/backend/models.py @@ -0,0 +1,128 @@ +from datetime import datetime, timezone +from enum import Enum +from typing import Literal, Optional +from uuid import uuid4 + +from pydantic import BaseModel, ConfigDict, Field + + +def utc_now() -> datetime: + return datetime.now(timezone.utc) + + +def new_id(prefix: str) -> str: + return f"{prefix}-{uuid4()}" + + +class Role(str, Enum): + user = "user" + assistant = "assistant" + system = "system" + + +class ErrorResponse(BaseModel): + error: str + message: str + code: int + + +class ChatRequest(BaseModel): + session_id: Optional[str] = None + prompt: str = Field(min_length=1, max_length=500) + max_tokens: int = Field(default=200, ge=1, le=500) + temperature: float = Field(default=1.0, ge=0.1, le=2.0) + stream: bool = False + model_backend: Literal["cpp", "torch"] = "cpp" + + +class ChatResponse(BaseModel): + id: str + session_id: str + prompt: str + text: str + chars: int + seconds: float + model: str = "quadtrix-v1.0" + model_backend: Literal["cpp", "torch"] = "cpp" + created_at: datetime + + +class Message(BaseModel): + id: str = Field(default_factory=lambda: new_id("msg")) + session_id: str + role: Role + text: str + prompt: Optional[str] = None + chars: int = 0 + seconds: float = 0.0 + error: Optional[str] = None + created_at: datetime = Field(default_factory=utc_now) + + +class Session(BaseModel): + id: str = Field(default_factory=lambda: str(uuid4())) + title: str = "New conversation" + created_at: datetime = Field(default_factory=utc_now) + updated_at: datetime = Field(default_factory=utc_now) + message_count: int = 0 + + +class CreateSessionRequest(BaseModel): + title: Optional[str] = Field(default=None, max_length=80) + + +class AddMessageRequest(BaseModel): + role: Role + text: str = Field(min_length=1) + + +class FeedbackRequest(BaseModel): + session_id: str + message_id: str + rating: Literal["up", "down"] + comment: Optional[str] = Field(default=None, max_length=1000) + + +class FeedbackResponse(BaseModel): + ok: bool + id: str + created_at: datetime + + +class HealthResponse(BaseModel): + status: Literal["ok", "degraded"] + api: Literal["ok"] + cpp_server: Literal["ok", "unreachable"] + torch_model: Literal["ok", "unavailable"] + model: str = "quadtrix-v1.0" + vocab: int = 105 + params: int = 826985 + uptime_seconds: float + + +class StatsResponse(BaseModel): + model: str = "quadtrix-v1.0" + architecture: str = "4L x 4H x 200d" + parameters: int = 826985 + vocabulary: int = 105 + val_loss: float = 1.6371 + context: int = 128 + training: str = "76.2 min CPU" + backend: str + backend_online: bool + torch_checkpoint: str + torch_online: bool + uptime_seconds: float + + +class CppGenerateRequest(BaseModel): + prompt: str + max_tokens: int + + +class CppGenerateResponse(BaseModel): + text: str + chars: int + seconds: float + + model_config = ConfigDict(extra="ignore") diff --git a/backend/requirements.txt b/backend/requirements.txt new file mode 100644 index 0000000..cec57cc --- /dev/null +++ b/backend/requirements.txt @@ -0,0 +1,8 @@ +fastapi==0.115.6 +uvicorn[standard]==0.34.0 +pydantic==2.10.4 +pydantic-settings==2.7.1 +httpx==0.28.1 +redis==5.2.1 +torch +tiktoken diff --git a/backend/router/__init__.py b/backend/router/__init__.py new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/backend/router/__init__.py @@ -0,0 +1 @@ + diff --git a/backend/router/chat.py b/backend/router/chat.py new file mode 100644 index 0000000..538ee76 --- /dev/null +++ b/backend/router/chat.py @@ -0,0 +1,79 @@ +import logging +import time + +from fastapi import APIRouter, HTTPException, Request + +from inference import InferenceClient, InferenceUnavailableError +from models import ChatRequest, ChatResponse, Message, Role, new_id, utc_now +from session_store import SessionStore + +router = APIRouter() +logger = logging.getLogger("quadtrix.api") + + +@router.post("/api/chat", response_model=ChatResponse) +async def chat(payload: ChatRequest, request: Request) -> ChatResponse: + started = time.monotonic() + store: SessionStore = request.app.state.session_store + client: InferenceClient = request.app.state.inference_client + title = payload.prompt[:40] + session = store.get_or_create_session(payload.session_id, title=title) + user_message = Message(session_id=session.id, role=Role.user, text=payload.prompt) + store.add_message(user_message) + + try: + generated = await client.generate( + prompt=payload.prompt, + max_tokens=payload.max_tokens, + temperature=payload.temperature, + model_backend=payload.model_backend, + ) + except InferenceUnavailableError as exc: + error_message = Message( + session_id=session.id, + role=Role.assistant, + text="Could not reach the selected model. Check the C++ server or engine checkpoint.", + error="model_unavailable", + ) + store.add_message(error_message) + raise HTTPException( + status_code=503, + detail={ + "error": "model_unavailable", + "message": exc.message, + "code": 503, + }, + ) from exc + + assistant_message = Message( + id=new_id("msg"), + session_id=session.id, + role=Role.assistant, + text=generated.text, + prompt=payload.prompt, + chars=generated.chars, + seconds=generated.seconds, + ) + store.add_message(assistant_message) + latency = round(time.monotonic() - started, 3) + logger.info( + "chat_request", + extra={ + "session_id": session.id, + "prompt_length": len(payload.prompt), + "latency": latency, + "chars": generated.chars, + "model_backend": payload.model_backend, + }, + ) + return ChatResponse( + id=assistant_message.id, + session_id=session.id, + prompt=payload.prompt, + text=generated.text, + chars=generated.chars, + seconds=generated.seconds, + model="quadtrix-v1.0-pt" if payload.model_backend == "torch" else "quadtrix-v1.0", + model_backend=payload.model_backend, + created_at=assistant_message.created_at, + ) diff --git a/backend/router/feedback.py b/backend/router/feedback.py new file mode 100644 index 0000000..94ee3ed --- /dev/null +++ b/backend/router/feedback.py @@ -0,0 +1,10 @@ +from fastapi import APIRouter + +from models import FeedbackRequest, FeedbackResponse, new_id, utc_now + +router = APIRouter() + + +@router.post("/api/feedback", response_model=FeedbackResponse) +async def feedback(payload: FeedbackRequest) -> FeedbackResponse: + return FeedbackResponse(ok=True, id=new_id("feedback"), created_at=utc_now()) diff --git a/backend/router/health.py b/backend/router/health.py new file mode 100644 index 0000000..928271a --- /dev/null +++ b/backend/router/health.py @@ -0,0 +1,56 @@ +import time + +from fastapi import APIRouter, Request + +from inference import InferenceClient, InferenceUnavailableError +from models import HealthResponse, StatsResponse + +router = APIRouter() +START_TIME = time.monotonic() + + +def uptime_seconds() -> float: + return round(time.monotonic() - START_TIME, 3) + + +@router.get("/api/health", response_model=HealthResponse) +async def health(request: Request) -> HealthResponse: + client: InferenceClient = request.app.state.inference_client + torch_status = "ok" if client.torch_health() else "unavailable" + try: + data = await client.health() + return HealthResponse( + status="ok", + api="ok", + cpp_server="ok", + torch_model=torch_status, + model=str(data.get("model", "quadtrix-v1.0")), + vocab=int(data.get("vocab", 105)), + params=int(data.get("params", 826985)), + uptime_seconds=uptime_seconds(), + ) + except InferenceUnavailableError: + return HealthResponse( + status="degraded", + api="ok", + cpp_server="unreachable", + torch_model=torch_status, + uptime_seconds=uptime_seconds(), + ) + + +@router.get("/api/stats", response_model=StatsResponse) +async def stats(request: Request) -> StatsResponse: + client: InferenceClient = request.app.state.inference_client + online = True + try: + await client.health() + except InferenceUnavailableError: + online = False + return StatsResponse( + backend=client.settings.cpp_server_url, + backend_online=online, + torch_checkpoint=str(client.torch_runner.checkpoint_path()), + torch_online=client.torch_health(), + uptime_seconds=uptime_seconds(), + ) diff --git a/backend/router/sessions.py b/backend/router/sessions.py new file mode 100644 index 0000000..c2b7a16 --- /dev/null +++ b/backend/router/sessions.py @@ -0,0 +1,44 @@ +from typing import List + +from fastapi import APIRouter, HTTPException, Request, status + +from models import AddMessageRequest, CreateSessionRequest, Message, Session +from session_store import SessionStore + +router = APIRouter() + + +def store_from_request(request: Request) -> SessionStore: + return request.app.state.session_store + + +@router.get("/api/sessions", response_model=List[Session]) +async def list_sessions(request: Request) -> List[Session]: + return store_from_request(request).list_sessions()[:50] + + +@router.post("/api/sessions", response_model=Session, status_code=status.HTTP_201_CREATED) +async def create_session(payload: CreateSessionRequest, request: Request) -> Session: + return store_from_request(request).create_session(title=payload.title) + + +@router.delete("/api/sessions/{session_id}", status_code=status.HTTP_204_NO_CONTENT) +async def delete_session(session_id: str, request: Request) -> None: + deleted = store_from_request(request).delete_session(session_id) + if not deleted: + raise HTTPException(status_code=404, detail="Session not found") + + +@router.get("/api/sessions/{session_id}/messages", response_model=List[Message]) +async def get_messages(session_id: str, request: Request) -> List[Message]: + store = store_from_request(request) + if store.get_session(session_id) is None: + raise HTTPException(status_code=404, detail="Session not found") + return store.get_messages(session_id) + + +@router.post("/api/sessions/{session_id}/messages", response_model=Message, status_code=status.HTTP_201_CREATED) +async def add_message(session_id: str, payload: AddMessageRequest, request: Request) -> Message: + store = store_from_request(request) + store.get_or_create_session(session_id) + return store.add_message(Message(session_id=session_id, role=payload.role, text=payload.text)) diff --git a/backend/server.py b/backend/server.py new file mode 100644 index 0000000..03835fe --- /dev/null +++ b/backend/server.py @@ -0,0 +1,517 @@ +""" +Quadtrix web backend. + +Run from the repository root: + python server.py + +Or directly: + python backend/server.py +""" + +from __future__ import annotations + +import codecs +import os +import queue +import re +import subprocess +import sys +import threading +import time +from pathlib import Path +from typing import Generator + +USER_SITE = ( + Path.home() + / "AppData" + / "Roaming" + / "Python" + / f"Python{sys.version_info.major}{sys.version_info.minor}" + / "site-packages" +) +if str(USER_SITE) not in sys.path: + sys.path.append(str(USER_SITE)) + +from flask import Flask, Response, jsonify, request, send_from_directory +from flask_cors import CORS + + +BACKEND_DIR = Path(__file__).resolve().parent +ROOT_DIR = BACKEND_DIR.parent +FRONTEND_DIR = ROOT_DIR / "frontend" + +EXE_PATH = Path(os.environ.get("QUADTRIX_EXE", ROOT_DIR / "Quadtrix.exe")).resolve() +DATA_PATH = Path(os.environ.get("QUADTRIX_DATA", ROOT_DIR / "data" / "input.txt")).resolve() +DEFAULT_MODEL_PATH = Path(os.environ.get("MODEL_PATH", ROOT_DIR / "best_model.bin")).resolve() + +MODEL_MARKER = "Quadtrix>" +PROMPT_MARKERS = ("\r\n\r\nYou>", "\n\nYou>", "\r\rYou>") +ANSI_RE = re.compile(r"\x1b\[[0-9;?]*[a-zA-Z]") +SAFE_MODEL_ROOTS = (ROOT_DIR, BACKEND_DIR) +MODEL_LOAD_TIMEOUT_SECONDS = 180 +GENERATION_IDLE_TIMEOUT_SECONDS = 180 + +app = Flask(__name__, static_folder=str(FRONTEND_DIR), static_url_path="") +CORS(app) + +procs: dict[str, subprocess.Popen] = {} +procs_lock = threading.Lock() + + +def _is_inside(path: Path, root: Path) -> bool: + try: + path.resolve().relative_to(root.resolve()) + return True + except ValueError: + return False + + +def _public_path(path: Path) -> str: + try: + return str(path.resolve().relative_to(ROOT_DIR.resolve())) + except ValueError: + return str(path) + + +def _discover_models() -> list[dict[str, str | bool]]: + """Find practical .bin model locations without walking the whole repo.""" + search_dirs = [ROOT_DIR, ROOT_DIR / "models", BACKEND_DIR / "models"] + seen: set[Path] = set() + models: list[dict[str, str | bool]] = [] + + for directory in search_dirs: + if not directory.exists() or not directory.is_dir(): + continue + for model in sorted(directory.glob("*.bin")): + resolved = model.resolve() + if resolved in seen: + continue + seen.add(resolved) + models.append( + { + "name": model.stem, + "path": _public_path(resolved), + "exists": resolved.exists(), + } + ) + + if DEFAULT_MODEL_PATH not in seen: + models.insert( + 0, + { + "name": DEFAULT_MODEL_PATH.stem or "best_model", + "path": _public_path(DEFAULT_MODEL_PATH), + "exists": DEFAULT_MODEL_PATH.exists(), + }, + ) + + return models + + +def _resolve_model(selection: str | None) -> tuple[Path | None, str | None]: + raw = (selection or "").strip() + candidate = Path(raw) if raw else DEFAULT_MODEL_PATH + + if not candidate.is_absolute(): + candidate = ROOT_DIR / candidate + + candidate = candidate.resolve() + + if candidate.suffix.lower() != ".bin": + return None, "selected model must be a .bin file" + + if not any(_is_inside(candidate, root) for root in SAFE_MODEL_ROOTS): + return None, "selected model must be inside this project" + + if not candidate.exists(): + return None, f"model not found: {candidate}" + + return candidate, None + + +def _drain_pipe(pipe) -> None: + try: + while True: + chunk = pipe.read(4096) + if not chunk: + break + except Exception: + pass + finally: + try: + pipe.close() + except Exception: + pass + + +def _split_ansi(text: str) -> tuple[str, str]: + out: list[str] = [] + i = 0 + n = len(text) + + while i < n: + char = text[i] + if char == "\x1b": + match = ANSI_RE.match(text, i) + if match: + i = match.end() + continue + return "".join(out), text[i:] + out.append(char) + i += 1 + + return "".join(out), "" + + +def _sse_char(char: str) -> str | None: + if char == "\n": + return "data: __NL__\n\n" + if char == "\r": + return None + return f"data: {char}\n\n" + + +def _sse_event(name: str, data: str) -> str: + return f"event: {name}\ndata: {data}\n\n" + + +def _ends_like_prompt_marker(text: str) -> bool: + tail = text[-8:] + return any(marker.startswith(tail) for marker in PROMPT_MARKERS if tail) + + +def stream_exe( + prompt: str, + max_tokens: int, + session_id: str, + model_path: Path, +) -> Generator[str, None, None]: + """Run Quadtrix.exe and stream only the model reply as SSE events.""" + env = os.environ.copy() + env["MODEL_PATH"] = str(model_path) + + cmd = [ + str(EXE_PATH), + "--chat", + "--chat-tokens", + str(max_tokens), + str(DATA_PATH), + ] + + try: + proc = subprocess.Popen( + cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env, + cwd=str(ROOT_DIR), + bufsize=0, + ) + except FileNotFoundError: + yield f"data: [ERROR] exe not found: {EXE_PATH}\n\n" + yield "data: [DONE]\n\n" + return + except Exception as exc: + yield f"data: [ERROR] {exc}\n\n" + yield "data: [DONE]\n\n" + return + + with procs_lock: + procs[session_id] = proc + + threading.Thread(target=_drain_pipe, args=(proc.stderr,), daemon=True).start() + + try: + assert proc.stdin is not None + proc.stdin.write((prompt + "\n").encode("utf-8")) + proc.stdin.flush() + except Exception as exc: + yield f"data: [ERROR] stdin write failed: {exc}\n\n" + try: + proc.kill() + except Exception: + pass + with procs_lock: + procs.pop(session_id, None) + yield "data: [DONE]\n\n" + return + + decoder = codecs.getincrementaldecoder("utf-8")(errors="replace") + pending = "" + pre_buffer = "" + output_hold = "" + started = False + first_emit = True + stopped_by_prompt = False + start_time = time.monotonic() + last_byte_time = start_time + last_status_time = start_time + stdout_queue: queue.Queue[bytes | None] = queue.Queue() + + def read_stdout() -> None: + try: + assert proc.stdout is not None + while True: + chunk = proc.stdout.read(1) + if not chunk: + break + stdout_queue.put(chunk) + finally: + stdout_queue.put(None) + + threading.Thread(target=read_stdout, daemon=True).start() + yield _sse_event("status", "Starting Quadtrix and loading the model...") + + def drain(final: bool) -> Generator[str, None, None]: + nonlocal pending, pre_buffer, output_hold, started, first_emit, stopped_by_prompt + + clean, remainder = _split_ansi(pending) + pending = "" if final else remainder + + if not started: + pre_buffer += clean + marker_index = pre_buffer.find(MODEL_MARKER) + if marker_index == -1: + # Some builds do not print the exact marker, or print it very late. + # After a short wait, stream from the latest known prompt boundary + # instead of leaving the browser blank forever. + if time.monotonic() - start_time > 5 and len(pre_buffer) > 0: + fallback_index = max(pre_buffer.rfind("\n"), pre_buffer.rfind("\r")) + clean = pre_buffer[fallback_index + 1 :] if fallback_index != -1 else pre_buffer + pre_buffer = "" + started = True + else: + if len(pre_buffer) > 16384: + pre_buffer = pre_buffer[-4096:] + return + else: + clean = pre_buffer[marker_index + len(MODEL_MARKER) :] + pre_buffer = "" + started = True + + def emit_text(text: str) -> Generator[str, None, None]: + nonlocal first_emit + for item in text: + if first_emit and item in (" ", "\t", "\n", "\r"): + continue + first_emit = False + event = _sse_char(item) + if event: + yield event + + max_marker_length = max(len(marker) for marker in PROMPT_MARKERS) + + for char in clean: + output_hold += char + + for marker in PROMPT_MARKERS: + marker_index = output_hold.find(marker) + if marker_index != -1: + yield from emit_text(output_hold[:marker_index].rstrip()) + output_hold = "" + stopped_by_prompt = True + return + + while len(output_hold) > max_marker_length - 1: + if _ends_like_prompt_marker(output_hold): + break + yield from emit_text(output_hold[0]) + output_hold = output_hold[1:] + + if final and output_hold: + yield from emit_text(output_hold) + output_hold = "" + + try: + while True: + try: + byte = stdout_queue.get(timeout=0.25) + except queue.Empty: + now = time.monotonic() + if not started and now - start_time > MODEL_LOAD_TIMEOUT_SECONDS: + yield "data: [ERROR] model did not become ready within 180 seconds\n\n" + try: + proc.kill() + except Exception: + pass + break + + if started and now - last_byte_time > GENERATION_IDLE_TIMEOUT_SECONDS: + yield "data: [ERROR] generation stalled for 180 seconds\n\n" + try: + proc.kill() + except Exception: + pass + break + + if now - last_status_time >= 3 and proc.poll() is None: + last_status_time = now + if not started: + yield _sse_event("status", "Still loading the model. First token will appear automatically...") + else: + yield _sse_event("status", "Generating tokens...") + continue + + if byte is None: + tail = decoder.decode(b"", final=True) + if tail: + pending += tail + yield from drain(final=True) + break + + last_byte_time = time.monotonic() + text = decoder.decode(byte) + if not text: + continue + + pending += text + yield from drain(final=False) + if stopped_by_prompt: + break + except GeneratorExit: + try: + proc.kill() + except Exception: + pass + with procs_lock: + procs.pop(session_id, None) + return + + try: + assert proc.stdin is not None + proc.stdin.write(b"quit\n") + proc.stdin.flush() + except Exception: + pass + + try: + proc.wait(timeout=3) + except subprocess.TimeoutExpired: + proc.kill() + + with procs_lock: + procs.pop(session_id, None) + + yield "data: [DONE]\n\n" + + +@app.route("/") +def index(): + return send_from_directory(FRONTEND_DIR, "index.html") + + +@app.route("/") +def frontend_file(path: str): + return send_from_directory(FRONTEND_DIR, path) + + +@app.route("/models") +def models(): + return jsonify({"default": _public_path(DEFAULT_MODEL_PATH), "models": _discover_models()}) + + +@app.route("/status") +def status(): + selected_model, model_error = _resolve_model(request.args.get("model")) + model_path = selected_model or DEFAULT_MODEL_PATH + exe_ok = EXE_PATH.exists() + data_ok = DATA_PATH.exists() + model_ok = selected_model is not None + + error = None + if not exe_ok: + error = f"Quadtrix.exe not found: {EXE_PATH}" + elif not data_ok: + error = f"data input not found: {DATA_PATH}" + elif model_error: + error = model_error + + return jsonify( + { + "exe": exe_ok, + "data": data_ok, + "model": model_ok, + "ready": exe_ok and data_ok and model_ok, + "error": error, + "exe_path": str(EXE_PATH), + "data_path": str(DATA_PATH), + "model_path": str(model_path), + "selected_model": _public_path(model_path), + } + ) + + +@app.route("/health") +def health(): + return jsonify({"ok": True}) + + +@app.route("/generate") +def generate(): + prompt = request.args.get("prompt", "").strip() + session_id = request.args.get("sid", "default").strip() or "default" + + try: + max_tokens = int(request.args.get("max_tokens", 200)) + except ValueError: + return jsonify({"error": "max_tokens must be a number"}), 400 + + max_tokens = max(1, min(max_tokens, 2000)) + model_path, model_error = _resolve_model(request.args.get("model")) + + if not prompt: + return jsonify({"error": "empty prompt"}), 400 + if not EXE_PATH.exists(): + return jsonify({"error": f"exe not found: {EXE_PATH}"}), 500 + if not DATA_PATH.exists(): + return jsonify({"error": f"data input not found: {DATA_PATH}"}), 500 + if model_error or model_path is None: + return jsonify({"error": model_error or "model not found"}), 500 + + return Response( + stream_exe(prompt, max_tokens, session_id, model_path), + mimetype="text/event-stream", + headers={ + "Cache-Control": "no-cache, no-store", + "Connection": "keep-alive", + "X-Accel-Buffering": "no", + }, + ) + + +@app.route("/stop", methods=["POST"]) +def stop(): + session_id = "default" + if request.is_json: + body = request.get_json(silent=True) or {} + session_id = str(body.get("sid") or "default") + + with procs_lock: + proc = procs.pop(session_id, None) + + if proc and proc.poll() is None: + proc.kill() + return jsonify({"status": "stopped"}) + return jsonify({"status": "idle"}) + + +def main() -> None: + print() + print("=" * 56) + print(" Quadtrix Web Interface") + print("=" * 56) + print(f" exe {EXE_PATH}") + print(f" {'found' if EXE_PATH.exists() else 'NOT FOUND'}") + print(f" data {DATA_PATH}") + print(f" {'found' if DATA_PATH.exists() else 'NOT FOUND'}") + print(f" model {DEFAULT_MODEL_PATH}") + print(f" {'found' if DEFAULT_MODEL_PATH.exists() else 'NOT FOUND'}") + print(" open http://localhost:5000") + print("=" * 56) + print() + app.run(host="0.0.0.0", port=5000, threaded=True, debug=False) + + +if __name__ == "__main__": + main() diff --git a/backend/session_store.py b/backend/session_store.py new file mode 100644 index 0000000..5d46973 --- /dev/null +++ b/backend/session_store.py @@ -0,0 +1,176 @@ +from collections import OrderedDict +from datetime import datetime, timedelta, timezone +from typing import Dict, List, Optional + +import redis + +from models import Message, Session, utc_now + + +class SessionStore: + def __init__(self, max_sessions: int, ttl_hours: int) -> None: + self.max_sessions = max_sessions + self.ttl = timedelta(hours=ttl_hours) + self.sessions: "OrderedDict[str, Session]" = OrderedDict() + self.messages: Dict[str, List[Message]] = {} + + def _is_expired(self, session: Session, now: datetime) -> bool: + return now - session.updated_at > self.ttl + + def prune(self) -> None: + now = utc_now() + expired = [session_id for session_id, session in self.sessions.items() if self._is_expired(session, now)] + for session_id in expired: + self.delete_session(session_id) + while len(self.sessions) > self.max_sessions: + session_id, _ = self.sessions.popitem(last=False) + self.messages.pop(session_id, None) + + def create_session(self, title: Optional[str] = None, session_id: Optional[str] = None) -> Session: + self.prune() + session = Session(title=title or "New conversation") + if session_id: + session.id = session_id + self.sessions[session.id] = session + self.messages[session.id] = [] + self.prune() + return session + + def get_or_create_session(self, session_id: Optional[str], title: Optional[str] = None) -> Session: + self.prune() + if session_id and session_id in self.sessions: + session = self.sessions[session_id] + self.sessions.move_to_end(session_id) + return session + return self.create_session(title=title, session_id=session_id) + + def list_sessions(self) -> List[Session]: + self.prune() + return list(reversed(self.sessions.values())) + + def get_session(self, session_id: str) -> Optional[Session]: + self.prune() + session = self.sessions.get(session_id) + if session: + self.sessions.move_to_end(session_id) + return session + + def delete_session(self, session_id: str) -> bool: + existed = session_id in self.sessions + self.sessions.pop(session_id, None) + self.messages.pop(session_id, None) + return existed + + def add_message(self, message: Message) -> Message: + session = self.get_or_create_session(message.session_id) + if session.title == "New conversation" and message.role.value == "user": + session.title = message.text[:40] + session.updated_at = utc_now() + items = self.messages.setdefault(session.id, []) + items.append(message) + session.message_count = len(items) + self.sessions[session.id] = session + self.sessions.move_to_end(session.id) + return message + + def get_messages(self, session_id: str) -> List[Message]: + self.prune() + return list(self.messages.get(session_id, [])) + + +class RedisSessionStore(SessionStore): + def __init__(self, max_sessions: int, ttl_hours: int, redis_url: str) -> None: + super().__init__(max_sessions=max_sessions, ttl_hours=ttl_hours) + self.redis_url = redis_url + self.client = redis.Redis.from_url(redis_url, decode_responses=True) + self.session_index = "quadtrix:sessions" + + def _ttl_seconds(self) -> int: + return int(self.ttl.total_seconds()) + + def _score(self, session: Session) -> float: + return session.updated_at.timestamp() + + def _session_key(self, session_id: str) -> str: + return f"quadtrix:session:{session_id}" + + def _messages_key(self, session_id: str) -> str: + return f"quadtrix:messages:{session_id}" + + def prune(self) -> None: + cutoff = (utc_now() - self.ttl).timestamp() + expired = self.client.zrangebyscore(self.session_index, "-inf", cutoff) + for session_id in expired: + self.delete_session(str(session_id)) + count = self.client.zcard(self.session_index) + if count > self.max_sessions: + overflow = int(count - self.max_sessions) + oldest = self.client.zrange(self.session_index, 0, overflow - 1) + for session_id in oldest: + self.delete_session(str(session_id)) + + def create_session(self, title: Optional[str] = None, session_id: Optional[str] = None) -> Session: + self.prune() + session = Session(title=title or "New conversation") + if session_id: + session.id = session_id + self.client.setex(self._session_key(session.id), self._ttl_seconds(), session.model_dump_json()) + self.client.delete(self._messages_key(session.id)) + self.client.expire(self._messages_key(session.id), self._ttl_seconds()) + self.client.zadd(self.session_index, {session.id: self._score(session)}) + self.prune() + return session + + def get_or_create_session(self, session_id: Optional[str], title: Optional[str] = None) -> Session: + self.prune() + if session_id: + session = self.get_session(session_id) + if session: + return session + return self.create_session(title=title, session_id=session_id) + + def list_sessions(self) -> List[Session]: + self.prune() + session_ids = self.client.zrevrange(self.session_index, 0, self.max_sessions - 1) + sessions: List[Session] = [] + for session_id in session_ids: + session = self.get_session(str(session_id)) + if session: + sessions.append(session) + return sessions + + def get_session(self, session_id: str) -> Optional[Session]: + raw = self.client.get(self._session_key(session_id)) + if not raw: + self.client.zrem(self.session_index, session_id) + return None + session = Session.model_validate_json(str(raw)) + self.client.zadd(self.session_index, {session.id: self._score(session)}) + return session + + def delete_session(self, session_id: str) -> bool: + deleted = bool(self.client.delete(self._session_key(session_id), self._messages_key(session_id))) + self.client.zrem(self.session_index, session_id) + return deleted + + def add_message(self, message: Message) -> Message: + session = self.get_or_create_session(message.session_id) + if session.title == "New conversation" and message.role.value == "user": + session.title = message.text[:40] + session.updated_at = utc_now() + session.message_count = int(self.client.rpush(self._messages_key(session.id), message.model_dump_json())) + self.client.setex(self._session_key(session.id), self._ttl_seconds(), session.model_dump_json()) + self.client.expire(self._messages_key(session.id), self._ttl_seconds()) + self.client.zadd(self.session_index, {session.id: self._score(session)}) + return message + + def get_messages(self, session_id: str) -> List[Message]: + self.prune() + rows = self.client.lrange(self._messages_key(session_id), 0, -1) + return [Message.model_validate_json(str(row)) for row in rows] + + +def build_session_store(max_sessions: int, ttl_hours: int, redis_url: str) -> SessionStore: + if redis_url: + return RedisSessionStore(max_sessions=max_sessions, ttl_hours=ttl_hours, redis_url=redis_url) + return SessionStore(max_sessions=max_sessions, ttl_hours=ttl_hours) diff --git a/benchmark.cpp b/benchmark.cpp new file mode 100644 index 0000000..041c725 --- /dev/null +++ b/benchmark.cpp @@ -0,0 +1,296 @@ +// Run: +// .\quadtrix_bench.exe data\input.txt +// .\quadtrix_bench.exe data\input.txt --tokens 100 --runs 10 --warmup 3 +// +// Flags (all optional): +// --tokens N tokens to generate per run (default: 50) +// --runs N how many timed runs per prompt (default: 5) +// --warmup N un-timed warmup runs per prompt (default: 2) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "config/config.h" +#include "include/dataloader.h" +#include "include/gpt.h" + +static bool file_exists(const std::string &p) +{ + std::ifstream f(p.c_str(), std::ios::binary); + return f.good(); +} + +static double now_ms() +{ + using namespace std::chrono; + return duration( + steady_clock::now().time_since_epoch()) + .count(); +} + +static double mean(const std::vector &v) +{ + return std::accumulate(v.begin(), v.end(), 0.0) / v.size(); +} + +static double stdev(const std::vector &v, double m) +{ + double sq = 0.0; + for (double x : v) + sq += (x - m) * (x - m); + return std::sqrt(sq / v.size()); +} + +static double timed_run(GPTLanguageModel &model, + DataLoader &dl, + const std::vector &prompt_ctx, + int n_tokens) +{ + std::vector ctx = prompt_ctx; + + double t0 = now_ms(); + for (int i = 0; i < n_tokens; ++i) + { + ctx = model.generate(ctx, 1); + if ((int)ctx.size() > BLOCK_SIZE) + ctx = std::vector(ctx.end() - BLOCK_SIZE, ctx.end()); + } + return now_ms() - t0; +} + +// + +static void section(const std::string &title) +{ + ; + std::cout << " " << title << "\n"; +} + +struct PromptResult +{ + std::string label; + int prompt_tokens; + int gen_tokens; + double avg_ms; + double min_ms; + double max_ms; + double std_ms; + double avg_tps; // tokens per second +}; + +static PromptResult bench_prompt(GPTLanguageModel &model, + DataLoader &dl, + const std::string &prompt, + int n_tokens, + int n_runs, + int n_warmup) +{ + // encode + std::vector ctx = dl.encode(prompt); + if (ctx.empty()) + ctx = {0}; + if ((int)ctx.size() > BLOCK_SIZE) + ctx = std::vector(ctx.end() - BLOCK_SIZE, ctx.end()); + + int prompt_len = (int)ctx.size(); + + // warmup (un-timed) + for (int i = 0; i < n_warmup; ++i) + timed_run(model, dl, ctx, n_tokens); + + // timed runs + std::vector times; + times.reserve(n_runs); + for (int i = 0; i < n_runs; ++i) + times.push_back(timed_run(model, dl, ctx, n_tokens)); + + double m = mean(times); + double sd = stdev(times, m); + double mn = *std::min_element(times.begin(), times.end()); + double mx = *std::max_element(times.begin(), times.end()); + double tps = n_tokens / (m / 1000.0); + + // truncate prompt for display + std::string label = prompt.size() > 30 + ? prompt.substr(0, 27) + "..." + : prompt; + + return PromptResult{label, prompt_len, n_tokens, m, mn, mx, sd, tps}; +} + +static void print_table(const std::vector &results) +{ + section("RESULTS"); + + // header + std::cout << std::left + << std::setw(34) << "Prompt" + << std::right + << std::setw(8) << "P.Tok" + << std::setw(8) << "G.Tok" + << std::setw(10) << "Avg ms" + << std::setw(10) << "Min ms" + << std::setw(10) << "Max ms" + << std::setw(9) << "Std ms" + << std::setw(10) << "tok/s" + << "\n"; + std::cout << std::string(99, '-') << "\n"; + + std::cout << std::fixed; + for (const auto &r : results) + { + std::cout << std::left + << std::setw(34) << r.label + << std::right + << std::setw(8) << r.prompt_tokens + << std::setw(8) << r.gen_tokens + << std::setw(10) << std::setprecision(1) << r.avg_ms + << std::setw(10) << std::setprecision(1) << r.min_ms + << std::setw(10) << std::setprecision(1) << r.max_ms + << std::setw(9) << std::setprecision(1) << r.std_ms + << std::setw(10) << std::setprecision(2) << r.avg_tps + << "\n"; + } + + double total_avg_tps = 0.0; + double best_tps = 0.0; + for (const auto &r : results) + { + total_avg_tps += r.avg_tps; + best_tps = std::max(best_tps, r.avg_tps); + } + double overall_tps = total_avg_tps / results.size(); + + std::cout << "\n Overall avg throughput : " + << std::setprecision(2) << overall_tps << " tok/s\n"; + std::cout << " Peak throughput : " + << std::setprecision(2) << best_tps << " tok/s\n"; + std::cout << " ms per token (avg) : " + << std::setprecision(2) << 1000.0 / overall_tps << " ms\n"; +} + +static void save_csv(const std::vector &results, + const std::string &path) +{ + std::ofstream f(path); + if (!f) + { + std::cerr << "[WARN] Could not write CSV to " << path << "\n"; + return; + } + f << "prompt,prompt_tokens,gen_tokens,avg_ms,min_ms,max_ms,std_ms,tok_per_sec\n"; + for (const auto &r : results) + { + f << "\"" << r.label << "\"," + << r.prompt_tokens << "," + << r.gen_tokens << "," + << r.avg_ms << "," + << r.min_ms << "," + << r.max_ms << "," + << r.std_ms << "," + << r.avg_tps << "\n"; + } + std::cout << "\n CSV saved to: " << path << "\n"; +} + +int main(int argc, char *argv[]) +{ + + std::string data_path = DEFAULT_CLEANED_PATH; + std::string model_path = BEST_MODEL_PATH; + int n_tokens = 50; + int n_runs = 5; + int n_warmup = 2; + + for (int i = 1; i < argc; ++i) + { + std::string a = argv[i]; + if (a == "--tokens" && i + 1 < argc) + n_tokens = std::atoi(argv[++i]); + else if (a == "--runs" && i + 1 < argc) + n_runs = std::atoi(argv[++i]); + else if (a == "--warmup" && i + 1 < argc) + n_warmup = std::atoi(argv[++i]); + else + data_path = a; + } + + std::cout << " Quadtrix Inference Benchmark\n"; + std::cout << " data : " << data_path << "\n"; + std::cout << " model : " << model_path << "\n"; + std::cout << " tokens : " << n_tokens << " per run\n"; + std::cout << " runs : " << n_runs << " timed + " + << n_warmup << " warmup\n"; + + DataLoader dl; + try + { + dl.load(data_path); + } + catch (const std::exception &e) + { + std::cerr << "[ERROR] " << e.what() << "\n"; + return 1; + } + + if (!file_exists(model_path)) + { + std::cerr << "[ERROR] Weights not found at " << model_path << "\n"; + std::cerr << "[HINT] Train first, or set " << MODEL_PATH_ENV_VAR << "\n"; + return 1; + } + + GPTLanguageModel model(dl.vocab_size, N_EMBD, N_HEAD, N_LAYER, BLOCK_SIZE, SEED); + model.load(model_path); + + std::cout << "\n[OK] Model loaded (" << model.num_params() / 1.0e6f + << " M params)\n"; + + std::vector prompts = { + "", + "The", // 1-token prompt + "Once upon a time", // short prompt + "The quick brown fox jumps", // medium prompt + std::string(1, 'a'), // long prompt (stress-tests context window) + }; + + section("RUNNING"); + std::vector results; + results.reserve(prompts.size()); + + for (size_t i = 0; i < prompts.size(); ++i) + { + std::string display = prompts[i].empty() + ? "(empty / BOS)" + : (prompts[i].size() > 30 + ? prompts[i].substr(0, 27) + "..." + : prompts[i]); + + std::cout << " [" << (i + 1) << "/" << prompts.size() << "] \"" + << display << "\" ... " << std::flush; + + PromptResult r = bench_prompt(model, dl, + prompts[i], + n_tokens, n_runs, n_warmup); + results.push_back(r); + + std::cout << std::fixed << std::setprecision(2) + << r.avg_tps << " tok/s\n"; + } + + print_table(results); + save_csv(results, "benchmark_results.csv"); + + std::cout << "\n"; + + std::cout << " Done.\n"; + return 0; +} \ No newline at end of file diff --git a/benchmark_results.csv b/benchmark_results.csv new file mode 100644 index 0000000..7496a05 --- /dev/null +++ b/benchmark_results.csv @@ -0,0 +1,6 @@ +prompt,prompt_tokens,gen_tokens,avg_ms,min_ms,max_ms,std_ms,tok_per_sec +"",1,50,2027,1848.4,2314.78,168.952,24.667 +"The",3,50,2561.63,2241.3,2986.35,306.631,19.5188 +"Once upon a time",16,50,3038.74,2885.22,3225.42,126.5,16.4542 +"The quick brown fox jumps",25,50,3975,3561.3,4783.34,454.592,12.5786 +"a",1,50,1862.99,1808.71,1929.64,46.4738,26.8386 diff --git a/bin/quadtrix.js b/bin/quadtrix.js new file mode 100644 index 0000000..1dd4287 --- /dev/null +++ b/bin/quadtrix.js @@ -0,0 +1,246 @@ +#!/usr/bin/env node + +const { createServer } = require("node:http"); +const { createReadStream, existsSync, mkdirSync, statSync } = require("node:fs"); +const { extname, join, resolve } = require("node:path"); +const { spawn } = require("node:child_process"); +const { platform } = require("node:os"); + +const packageRoot = resolve(__dirname, ".."); +const userRoot = process.cwd(); +const isWindows = platform() === "win32"; +const python = isWindows ? "python" : "python3"; +const cppBinary = join(userRoot, ".quadtrix", "bin", isWindows ? "quadtrix.exe" : "quadtrix"); + +const mimeTypes = { + ".css": "text/css", + ".html": "text/html", + ".ico": "image/x-icon", + ".js": "text/javascript", + ".json": "application/json", + ".png": "image/png", + ".svg": "image/svg+xml", + ".webmanifest": "application/manifest+json", +}; + +function usage() { + console.log(` +Quadtrix CLI + +Usage: + quadtrix chat [--api-port 3001] [--web-port 5173] [--no-open] + quadtrix train --backend cpp [--data data/input.txt] + quadtrix train --backend python + quadtrix setup + +Commands: + chat Start the FastAPI backend, serve the built frontend, and open chat. + train Train locally with either the C++ or Python implementation. + setup Install Python backend/engine dependencies with pip. +`); +} + +function argValue(args, name, fallback) { + const index = args.indexOf(name); + if (index === -1 || index + 1 >= args.length) { + return fallback; + } + return args[index + 1]; +} + +function hasArg(args, name) { + return args.includes(name); +} + +function run(command, args, options = {}) { + const child = spawn(command, args, { + cwd: options.cwd || userRoot, + env: { ...process.env, ...(options.env || {}) }, + stdio: options.stdio || "inherit", + shell: false, + }); + + child.on("error", (error) => { + console.error(`Failed to start ${command}: ${error.message}`); + }); + + return child; +} + +function openBrowser(url) { + if (isWindows) { + spawn("cmd", ["/c", "start", "", url], { detached: true, stdio: "ignore" }).unref(); + return; + } + + if (platform() === "darwin") { + spawn("open", [url], { detached: true, stdio: "ignore" }).unref(); + return; + } + + spawn("xdg-open", [url], { detached: true, stdio: "ignore" }).unref(); +} + +function serveStatic(directory, port) { + if (!existsSync(directory)) { + console.error(`Frontend build not found: ${directory}`); + console.error("Run `npm run build:frontend` before packing or publishing."); + process.exit(1); + } + + const server = createServer((request, response) => { + const rawPath = decodeURIComponent((request.url || "/").split("?")[0]); + const safePath = rawPath.replace(/^\/+/, ""); + let filePath = resolve(directory, safePath || "index.html"); + + if (!filePath.startsWith(resolve(directory))) { + response.writeHead(403); + response.end("Forbidden"); + return; + } + + if (!existsSync(filePath) || statSync(filePath).isDirectory()) { + filePath = join(directory, "index.html"); + } + + response.writeHead(200, { + "Content-Type": mimeTypes[extname(filePath)] || "application/octet-stream", + }); + createReadStream(filePath).pipe(response); + }); + + server.listen(port, () => { + console.log(`Frontend: http://localhost:${port}`); + }); + + return server; +} + +function startChat(args) { + const apiPort = argValue(args, "--api-port", "3001"); + const webPort = argValue(args, "--web-port", "5173"); + const frontendDist = join(packageRoot, "frontend", "dist"); + const backendDir = join(packageRoot, "backend"); + const url = `http://localhost:${webPort}`; + + const api = run(python, ["-m", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", apiPort], { + cwd: backendDir, + env: { + API_PORT: apiPort, + CORS_ORIGINS: url, + }, + }); + const web = serveStatic(frontendDist, Number(webPort)); + + console.log("Starting Quadtrix chat..."); + console.log(`Backend: http://localhost:${apiPort}`); + + if (!hasArg(args, "--no-open")) { + setTimeout(() => openBrowser(url), 1200); + } + + const stop = () => { + web.close(); + api.kill(); + process.exit(0); + }; + + process.on("SIGINT", stop); + process.on("SIGTERM", stop); +} + +function setup() { + const requirements = join(packageRoot, "backend", "requirements.txt"); + const child = run(python, ["-m", "pip", "install", "-r", requirements]); + child.on("exit", (code) => process.exit(code || 0)); +} + +function compileCpp() { + mkdirSync(join(userRoot, ".quadtrix", "bin"), { recursive: true }); + const child = run("g++", [ + "-std=c++17", + "-O2", + "-I.", + "-Iinclude", + "-o", + cppBinary, + "main.cpp", + ], { cwd: packageRoot }); + + return new Promise((resolvePromise) => { + child.on("exit", (code) => resolvePromise(code || 0)); + }); +} + +function resolveTrainingData(args) { + const requested = argValue(args, "--data", join(userRoot, "data", "input.txt")); + const data = resolve(userRoot, requested); + + if (!existsSync(data)) { + console.error(`Training data not found: ${data}`); + console.error("Pass a text file with `--data ./path/to/input.txt`."); + process.exit(1); + } + + return data; +} + +async function train(args) { + const backend = argValue(args, "--backend", "cpp"); + + if (backend === "cpp") { + const data = resolveTrainingData(args); + const code = await compileCpp(); + if (code !== 0) { + process.exit(code); + } + run(cppBinary, [data]).on("exit", (exitCode) => process.exit(exitCode || 0)); + return; + } + + if (backend === "python" || backend === "py") { + const data = resolveTrainingData(args); + const script = join(packageRoot, "engine", "main.py"); + run(python, [script], { + cwd: join(packageRoot, "engine"), + env: { QUADTRIX_TRAIN_DATA: data }, + }).on("exit", (exitCode) => { + process.exit(exitCode || 0); + }); + return; + } + + console.error(`Unknown backend: ${backend}`); + console.error("Use `--backend cpp` or `--backend python`."); + process.exit(1); +} + +async function main() { + const [command, ...args] = process.argv.slice(2); + + if (!command || command === "--help" || command === "-h") { + usage(); + return; + } + + if (command === "chat") { + startChat(args); + return; + } + + if (command === "setup") { + setup(); + return; + } + + if (command === "train") { + await train(args); + return; + } + + console.error(`Unknown command: ${command}`); + usage(); + process.exit(1); +} + +main(); diff --git a/contributing.md b/contributing.md new file mode 100644 index 0000000..3705488 --- /dev/null +++ b/contributing.md @@ -0,0 +1,162 @@ +# Contributing to Quadtrix.cpp + +Thanks for helping improve Quadtrix.cpp. This project is a transformer learning lab with several execution paths: native C++ training and inference, PyTorch experiments, a FastAPI backend, and a React + TypeScript chat UI. Contributions are easiest to review when they keep those paths clear and testable. + +## Good First Contributions + +Useful contributions include: + +- Fixing correctness bugs in the C++ transformer implementation. +- Improving training, inference, checkpoint loading, or export scripts. +- Making the FastAPI backend more reliable and easier to run locally. +- Improving the React chat UI without hiding the model/backend behavior. +- Adding focused documentation for setup, model files, datasets, or run commands. +- Tightening CI, dependency versions, packaging, or release steps. + +For larger model architecture changes, open an issue first so the design can be discussed before a big patch lands. + +## Repository Layout + +```text +Quadtrix.cpp/ + main.cpp Native C++ entry point + include/ C++ headers + src/ C++ source files + engine/ PyTorch training, inference, export, and model files + backend/ FastAPI backend and session handling + frontend/ React + TypeScript chat UI + iGPU/ Integrated GPU experiments + config/ Runtime configuration + data/ Local datasets and helpers + .github/workflows/ CI and release workflows +``` + +## Development Setup + +From the repository root: + +```powershell +python -m venv .venv +.\.venv\Scripts\python.exe -m pip install --upgrade pip +cd backend +..\.venv\Scripts\python.exe -m pip install -r requirements.txt +``` + +Install frontend dependencies: + +```powershell +cd frontend +npm.cmd install +``` + +Build the native C++ runtime from the repository root: + +```bash +g++ -std=c++17 -O2 -I. -Iinclude -o quadtrix main.cpp +``` + +## Running Locally + +Run the C++ model: + +```powershell +.\Quadtrix.exe data\input.txt --chat +``` + +Run the C++ HTTP server: + +```powershell +.\Quadtrix.exe data\input.txt --server --port 8080 +``` + +Run the FastAPI backend: + +```powershell +cd backend +..\.venv\Scripts\python.exe -m uvicorn main:app --host 127.0.0.1 --port 3001 +``` + +Run the frontend: + +```powershell +cd frontend +npm.cmd run dev +``` + +Open the app at: + +```text +http://localhost:5173 +``` + +## Checks Before Opening a Pull Request + +Run the checks that match your change. + +C++: + +```bash +g++ -std=c++17 -O2 -I. -Iinclude -o quadtrix main.cpp +``` + +Python: + +```powershell +.\.venv\Scripts\python.exe -m compileall backend engine iGPU +cd backend +..\.venv\Scripts\python.exe -c "from main import app; print(app.title)" +``` + +Frontend: + +```powershell +cd frontend +npm.cmd run build +``` + +If you cannot run a relevant check, mention that in the pull request and explain why. + +## Pull Request Guidelines + +- Keep changes focused on one problem or feature. +- Use the existing style of the file you are editing. +- Avoid committing generated artifacts unless the project already expects them. +- Do not commit `.env` files, secrets, private datasets, or personal checkpoints. +- Update `README.md`, `run.md`, or related docs when commands or behavior change. +- Include screenshots or short notes for UI changes. +- Mention any change that affects model files, ports, CORS, service workers, or packaging. + +The pull request template asks for: + +- Summary and user-facing impact. +- C++ build status. +- Backend smoke-test status. +- Frontend build status. +- Documentation or screenshot updates when needed. + +## Coding Notes + +For C++ changes: + +- Prefer clear, debuggable code over clever abstractions. +- Keep the educational value of the implementation visible. +- Be careful with tensor shapes, bounds, and ownership. +- Add comments only where the math or control flow is not obvious. + +For Python changes: + +- Keep backend behavior explicit and local-development friendly. +- Avoid broad exception swallowing around model loading or inference. +- Treat model paths, datasets, and request payloads as untrusted inputs. + +For frontend changes: + +- Keep the chat UI practical and responsive. +- Preserve the ability to switch between the C++ backend and the `.pt` path. +- Make loading, error, and disconnected states clear. + +## Documentation Style + +Use concrete commands and paths. Quadtrix.cpp has multiple runtime paths, so say exactly which path a command belongs to: C++, PyTorch, FastAPI backend, frontend, iGPU, or packaging. + +When documenting training results, include the hardware, dataset, iteration count, elapsed time, and validation metric so results can be compared fairly. diff --git a/docs/Quadtrix_Educational_Paper.pdf b/docs/Quadtrix_Educational_Paper.pdf new file mode 100644 index 0000000..d8d2a3a Binary files /dev/null and b/docs/Quadtrix_Educational_Paper.pdf differ diff --git a/docs/run_20260430_192930.png b/docs/run_20260430_192930.png new file mode 100644 index 0000000..8548392 Binary files /dev/null and b/docs/run_20260430_192930.png differ diff --git a/docs/run_20260508_110726.png b/docs/run_20260508_110726.png new file mode 100644 index 0000000..5848440 Binary files /dev/null and b/docs/run_20260508_110726.png differ diff --git a/engine/best_model.pt b/engine/best_model.pt deleted file mode 100644 index 1595ea5..0000000 Binary files a/engine/best_model.pt and /dev/null differ diff --git a/engine/fine-tune/chat.py b/engine/fine-tune/chat.py new file mode 100644 index 0000000..ce21f3c --- /dev/null +++ b/engine/fine-tune/chat.py @@ -0,0 +1,270 @@ +import torch +import torch.nn as nn +from torch.nn import functional as F +import tiktoken +DEFAULT_CONFIG = { + 'n_embd': 64, + 'n_head': 4, + 'n_layer': 4, + 'block_size': 32, + 'dropout': 0.0, +} + +device = 'cuda' if torch.cuda.is_available() else 'cpu' +tokenizer = tiktoken.get_encoding("gpt2") +vocab_size = tokenizer.n_vocab + +class Head(nn.Module): + def __init__(self, head_size, block_size, dropout): + super().__init__() + self.key = nn.Linear(DEFAULT_CONFIG['n_embd'], head_size, bias=False) + self.query = nn.Linear(DEFAULT_CONFIG['n_embd'], head_size, bias=False) + self.value = nn.Linear(DEFAULT_CONFIG['n_embd'], head_size, bias=False) + self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size))) + self.dropout = nn.Dropout(dropout) + + def forward(self, x): + _, T, _ = x.shape + k = self.key(x) + q = self.query(x) + wei = q @ k.transpose(-2, -1) * k.shape[-1] ** -0.5 + wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) + wei = F.softmax(wei, dim=-1) + wei = self.dropout(wei) + return wei @ self.value(x) + + +class MultiHeadAttention(nn.Module): + def __init__(self, num_heads, head_size, block_size, dropout): + super().__init__() + n_embd = DEFAULT_CONFIG['n_embd'] + self.heads = nn.ModuleList([Head(head_size, block_size, dropout) for _ in range(num_heads)]) + self.proj = nn.Linear(head_size * num_heads, n_embd) + self.dropout = nn.Dropout(dropout) + + def forward(self, x): + out = torch.cat([h(x) for h in self.heads], dim=-1) + return self.dropout(self.proj(out)) + + +class FeedForward(nn.Module): + def __init__(self, n_embd, dropout): + super().__init__() + self.net = nn.Sequential( + nn.Linear(n_embd, 4 * n_embd), + nn.ReLU(), + nn.Linear(4 * n_embd, n_embd), + nn.Dropout(dropout), + ) + + def forward(self, x): + return self.net(x) + + +class Block(nn.Module): + def __init__(self, n_embd, n_head, block_size, dropout): + super().__init__() + head_size = n_embd // n_head + self.sa = MultiHeadAttention(n_head, head_size, block_size, dropout) + self.ffwd = FeedForward(n_embd, dropout) + self.ln1 = nn.LayerNorm(n_embd) + self.ln2 = nn.LayerNorm(n_embd) + + def forward(self, x): + x = x + self.sa(self.ln1(x)) + x = x + self.ffwd(self.ln2(x)) + return x + + +class GPTLanguageModel(nn.Module): + def __init__(self, cfg): + super().__init__() + n_embd = cfg['n_embd'] + n_head = cfg['n_head'] + n_layer = cfg['n_layer'] + block_size = cfg['block_size'] + dropout = cfg['dropout'] + + self.block_size = block_size + self.token_embedding_table = nn.Embedding(vocab_size, n_embd) + self.position_embedding_table = nn.Embedding(block_size, n_embd) + self.blocks = nn.Sequential( + *[Block(n_embd, n_head, block_size, dropout) for _ in range(n_layer)] + ) + self.ln_f = nn.LayerNorm(n_embd) + self.lm_head = nn.Linear(n_embd, vocab_size) + + def forward(self, idx, targets=None): + B, T = idx.shape + tok_emb = self.token_embedding_table(idx) + pos_emb = self.position_embedding_table(torch.arange(T, device=idx.device)) + x = tok_emb + pos_emb + x = self.blocks(x) + x = self.ln_f(x) + logits = self.lm_head(x) + loss = None + if targets is not None: + B, T, C = logits.shape + loss = F.cross_entropy(logits.view(B * T, C), targets.view(B * T)) + return logits, loss + + @torch.no_grad() + def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None, top_p=None): + for _ in range(max_new_tokens): + idx_cond = idx[:, -self.block_size:] + logits, _ = self(idx_cond) + logits = logits[:, -1, :] / temperature # (B, vocab_size) + + # Top-k filtering + if top_k is not None: + v, _ = torch.topk(logits, min(top_k, logits.size(-1))) + logits[logits < v[:, [-1]]] = float('-inf') + + # Top-p (nucleus) filtering + if top_p is not None: + sorted_logits, sorted_idx = torch.sort(logits, descending=True) + cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) + remove = cum_probs - F.softmax(sorted_logits, dim=-1) > top_p + sorted_logits[remove] = float('-inf') + logits = torch.zeros_like(logits).scatter(1, sorted_idx, sorted_logits) + + probs = F.softmax(logits, dim=-1) + idx_next = torch.multinomial(probs, num_samples=1) + idx = torch.cat((idx, idx_next), dim=1) + return idx +def load_model(pt_path: str) -> GPTLanguageModel: + print(f"Loading checkpoint: {pt_path}") + checkpoint = torch.load(pt_path, map_location=device) + + if isinstance(checkpoint, dict): + cfg = checkpoint.get('config', DEFAULT_CONFIG) + state_dict = checkpoint.get('model', checkpoint) + else: + # Raw state dict saved directly + cfg = DEFAULT_CONFIG + state_dict = checkpoint + + # Merge missing keys with defaults + for k, v in DEFAULT_CONFIG.items(): + cfg.setdefault(k, v) + cfg['dropout'] = 0.0 # always off at inference + cfg['vocab_size'] = vocab_size + + # Update module-level config so layers build correctly + DEFAULT_CONFIG.update(cfg) + + model = GPTLanguageModel(cfg).to(device) + model.load_state_dict(state_dict, strict=False) + model.eval() + + total_params = sum(p.numel() for p in model.parameters()) + print(f"Model loaded | params: {total_params:,} | device: {device}") + print(f" block_size={cfg['block_size']} n_embd={cfg['n_embd']} " + f"n_head={cfg['n_head']} n_layer={cfg['n_layer']}") + return model +def generate_reply(model, prompt: str, max_new_tokens=200, + temperature=0.8, top_k=50, top_p=0.95) -> str: + tokens = tokenizer.encode(prompt) + idx = torch.tensor([tokens], dtype=torch.long, device=device) + out = model.generate(idx, max_new_tokens=max_new_tokens, + temperature=temperature, top_k=top_k, top_p=top_p) + # Return only the newly generated part + new_tokens = out[0][len(tokens):].tolist() + return tokenizer.decode(new_tokens) + +def chat(model): + print("\n" + "═" * 60) + print(" Quadtrix Chat — type 'quit' or 'exit' to stop") + print(" Commands: /temp <0-2> /tokens /topk /topp <0-1> /reset") + print("═" * 60 + "\n") + + # Mutable settings + settings = { + 'temperature': 0.8, + 'max_new_tokens': 200, + 'top_k': 50, + 'top_p': 0.95, + 'context_window': True, # keep rolling context + } + history = "" # rolling conversation context + + while True: + try: + user_input = input("You: ").strip() + except (EOFError, KeyboardInterrupt): + print("\nGoodbye!") + break + + if not user_input: + continue + + if user_input.lower() in ('quit', 'exit'): + print("Goodbye!") + break + if user_input.startswith('/'): + parts = user_input.split() + cmd = parts[0].lower() + try: + if cmd == '/temp' and len(parts) == 2: + settings['temperature'] = float(parts[1]) + print(f" temperature set to {settings['temperature']}") + elif cmd == '/tokens' and len(parts) == 2: + settings['max_new_tokens'] = int(parts[1]) + print(f" max_new_tokens set to {settings['max_new_tokens']}") + elif cmd == '/topk' and len(parts) == 2: + settings['top_k'] = int(parts[1]) + print(f" top_k set to {settings['top_k']}") + elif cmd == '/topp' and len(parts) == 2: + settings['top_p'] = float(parts[1]) + print(f" top_p set to {settings['top_p']}") + elif cmd == '/reset': + history = "" + print(" conversation history cleared") + elif cmd == '/settings': + print(f" {settings}") + else: + print(f" Unknown command: {cmd}") + except ValueError: + print(" Invalid value") + continue + history += user_input + "\n" + prompt = history + reply = generate_reply( + model, prompt, + max_new_tokens = settings['max_new_tokens'], + temperature = settings['temperature'], + top_k = settings['top_k'], + top_p = settings['top_p'], + ) + + print(f"\nModel: {reply.strip()}\n") + history += reply + "\n" + tokens = tokenizer.encode(history) + if len(tokens) > model.block_size - 50: + history = tokenizer.decode(tokens[-(model.block_size - 50):]) + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser(description='Chat with model') + parser.add_argument('--model', type=str, default='finetuned_model.pt', + help='Path to the .pt checkpoint file (default: finetuned_model.pt)') + parser.add_argument('--max-tokens', type=int, default=200, help='Max new tokens per reply') + parser.add_argument('--temperature', type=float, default=0.8, help='Sampling temperature (0.1–2.0)') + parser.add_argument('--top-k', type=int, default=50, help='Top-k sampling (0 = disabled)') + parser.add_argument('--top-p', type=float, default=0.95, help='Top-p nucleus sampling') + parser.add_argument('--prompt', type=str, default=None, help='Single prompt (non-interactive)') + args = parser.parse_args() + + model = load_model(args.model) + + if args.prompt: + # One-shot mode + reply = generate_reply(model, args.prompt, + max_new_tokens=args.max_tokens, + temperature=args.temperature, + top_k=args.top_k if args.top_k > 0 else None, + top_p=args.top_p) + print(reply) + else: + chat(model) \ No newline at end of file diff --git a/engine/fine-tune/data-set.py b/engine/fine-tune/data-set.py new file mode 100644 index 0000000..5374c56 --- /dev/null +++ b/engine/fine-tune/data-set.py @@ -0,0 +1,76 @@ +import os +import sys +import argparse +from datasets import load_dataset + +TARGET_MB = 30 +OUTPUT_FILE = "input.txt" + +DATASETS = { + "alpaca": ("yahma/alpaca-cleaned", None, "train"), + "dolly": ("databricks/databricks-dolly-15k", None, "train"), + "tinystories": ("roneneldan/TinyStories", None, "train"), + "wikitext": ("Salesforce/wikitext", "wikitext-103-raw-v1", "train"), + "oasst": ("OpenAssistant/oasst1", None, "train"), + "gsm8k": ("openai/gsm8k", "main", "train"), +} + +def row_to_text(row): + for field in ("text", "code", "content", "response", "output"): + if row.get(field, "").strip(): + return row[field].strip() + "\n\n" + parts = [] + if row.get("instruction", "").strip(): + parts.append("### Instruction:\n" + row["instruction"].strip()) + if row.get("input", "").strip(): + parts.append("### Input:\n" + row["input"].strip()) + if row.get("output", "").strip(): + parts.append("### Response:\n" + row["output"].strip()) + if parts: + return "\n\n".join(parts) + "\n\n" + for v in row.values(): + if isinstance(v, str) and v.strip(): + return v.strip() + "\n\n" + return "" + +def download(dataset_key, target_mb, output_file): + target_bytes = target_mb * 1024 * 1024 + + if dataset_key not in DATASETS: + print(f"Unknown dataset '{dataset_key}'. Choose from: {', '.join(DATASETS)}") + sys.exit(1) + + hf_id, config, split = DATASETS[dataset_key] + print(f"Downloading '{dataset_key}' → {output_file} (target: {target_mb} MB)") + + load_kwargs = dict(split=split, streaming=True, trust_remote_code=True) + if config: + load_kwargs["name"] = config + + ds = load_dataset(hf_id, **load_kwargs) + + written = 0 + rows = 0 + with open(output_file, "w", encoding="utf-8") as f: + for row in ds: + text = row_to_text(row) + if not text: + continue + f.write(text) + written += len(text.encode()) + rows += 1 + if written >= target_bytes: + break + + print(f"Done. {rows} rows | {written / 1024 / 1024:.2f} MB → {output_file}") + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--dataset", default="alpaca", choices=DATASETS.keys()) + parser.add_argument("--mb", type=int, default=TARGET_MB) + parser.add_argument("--output", default=OUTPUT_FILE) + args = parser.parse_args() + download(args.dataset, args.mb, args.output) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/engine/fine-tune/main.py b/engine/fine-tune/main.py new file mode 100644 index 0000000..64b655c --- /dev/null +++ b/engine/fine-tune/main.py @@ -0,0 +1,204 @@ +import torch +import torch.nn as nn +from torch.nn import functional as F +import time +import tiktoken +from pathlib import Path + +# Training configuration +script_dir = Path(__file__).parent +file_path = script_dir / "input.txt" +model_path=script_dir / 'best_model.pt' +batch_size = 16 +block_size = 32 +max_iters = 2000 +eval_interval = 100 +learning_rate = 1e-3 +device = 'cuda' if torch.cuda.is_available() else 'cpu' +eval_iters = 20 +n_embd = 64 +n_head = 4 +n_layer = 4 +dropout = 0.1 + +# Tokenizer setup +tokenizer = tiktoken.get_encoding("gpt2") +vocab_size = tokenizer.n_vocab + +# Model definition (minimal GPT) +class Head(nn.Module): + def __init__(self, head_size): + super().__init__() + self.key = nn.Linear(n_embd, head_size, bias=False) + self.query = nn.Linear(n_embd, head_size, bias=False) + self.value = nn.Linear(n_embd, head_size, bias=False) + self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size))) + self.dropout = nn.Dropout(dropout) + + def forward(self, x): + _, T, _ = x.shape + k = self.key(x) + q = self.query(x) + wei = q @ k.transpose(-2, -1) * k.shape[-1]**-0.5 + wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) + wei = F.softmax(wei, dim=-1) + wei = self.dropout(wei) + return wei @ self.value(x) + +class MultiHeadAttention(nn.Module): + def __init__(self, num_heads, head_size): + super().__init__() + self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)]) + self.proj = nn.Linear(head_size * num_heads, n_embd) + self.dropout = nn.Dropout(dropout) + + def forward(self, x): + out = torch.cat([h(x) for h in self.heads], dim=-1) + return self.dropout(self.proj(out)) + +class FeedForward(nn.Module): + def __init__(self, n_embd): + super().__init__() + self.net = nn.Sequential( + nn.Linear(n_embd, 4 * n_embd), + nn.ReLU(), + nn.Linear(4 * n_embd, n_embd), + nn.Dropout(dropout), + ) + + def forward(self, x): + return self.net(x) + +class Block(nn.Module): + def __init__(self, n_embd, n_head): + super().__init__() + head_size = n_embd // n_head + self.sa = MultiHeadAttention(n_head, head_size) + self.ffwd = FeedForward(n_embd) + self.ln1 = nn.LayerNorm(n_embd) + self.ln2 = nn.LayerNorm(n_embd) + + def forward(self, x): + x = x + self.sa(self.ln1(x)) + x = x + self.ffwd(self.ln2(x)) + return x + +class GPTLanguageModel(nn.Module): + def __init__(self): + super().__init__() + self.token_embedding_table = nn.Embedding(vocab_size, n_embd) + self.position_embedding_table = nn.Embedding(block_size, n_embd) + self.blocks = nn.Sequential(*[Block(n_embd, n_head) for _ in range(n_layer)]) + self.ln_f = nn.LayerNorm(n_embd) + self.lm_head = nn.Linear(n_embd, vocab_size) + + def forward(self, idx, targets=None): + B, T = idx.shape + tok_emb = self.token_embedding_table(idx) + pos_emb = self.position_embedding_table(torch.arange(T, device=device)) + x = tok_emb + pos_emb + x = self.blocks(x) + x = self.ln_f(x) + logits = self.lm_head(x) + + if targets is None: + loss = None + else: + B, T, C = logits.shape + logits = logits.view(B*T, C) + targets = targets.view(B*T) + loss = F.cross_entropy(logits, targets) + + return logits, loss + +# Data loading function +def get_batch(split): + data = train_data if split == 'train' else val_data + ix = torch.randint(len(data) - block_size, (batch_size,)) + x = torch.stack([data[i:i+block_size] for i in ix]) + y = torch.stack([data[i+1:i+block_size+1] for i in ix]) + x, y = x.to(device), y.to(device) + return x, y + +@torch.no_grad() +def estimate_loss(): + out = {} + model.eval() + for split in ['train', 'val']: + losses = torch.zeros(eval_iters) + for k in range(eval_iters): + X, Y = get_batch(split) + logits, loss = model(X, Y) + losses[k] = loss.item() + out[split] = losses.mean() + model.train() + return out + +# Load your dataset - REPLACE 'input.txt' with your actual data file +print("Loading data...") +with open(file_path, 'r', encoding='utf-8') as f: + text = f.read() + +# Encode using tiktoken +data = torch.tensor(tokenizer.encode(text), dtype=torch.long) +n = int(0.9 * len(data)) +train_data = data[:n] +val_data = data[n:] +print(f"Dataset: {len(data):,} tokens | Train: {len(train_data):,} | Val: {len(val_data):,}") + +# Initialize model +print(f"Initializing model with vocab_size={vocab_size}") +model = GPTLanguageModel().to(device) + +# Load existing weights +print("Loading weights from best_model.pt...") +checkpoint = torch.load(model_path, map_location=device) +if isinstance(checkpoint, dict): + model.load_state_dict(checkpoint['model'] if 'model' in checkpoint else checkpoint) +else: + model.load_state_dict(checkpoint) +print(f"Weights loaded successfully") + +# Optimizer +optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate) + +# Training loop +print(f"\nStarting fine-tuning on {device}") +print(f"{'Step':<10} {'Train Loss':<12} {'Val Loss':<12} {'Time (ms)':<12} {'Tok/s':<10}") +print("-" * 66) + +start_time = time.time() +for iter in range(max_iters): + + # Evaluate + if iter % eval_interval == 0 or iter == max_iters - 1: + losses = estimate_loss() + elapsed = (time.time() - start_time) * 1000 + tokens_per_sec = (batch_size * block_size * eval_interval) / ((time.time() - start_time) if iter > 0 else 1) + + print(f"{iter:<10} {losses['train']:.6f} {losses['val']:.6f} {elapsed:<12.2f} {tokens_per_sec:<10.0f}") + start_time = time.time() + + # Training step + xb, yb = get_batch('train') + logits, loss = model(xb, yb) + optimizer.zero_grad(set_to_none=True) + loss.backward() + optimizer.step() + +# Save fine-tuned model +print("\nSaving fine-tuned model...") +torch.save({ + 'model': model.state_dict(), + 'optimizer': optimizer.state_dict(), + 'iter': max_iters, + 'config': { + 'vocab_size': vocab_size, + 'n_embd': n_embd, + 'n_head': n_head, + 'n_layer': n_layer, + 'block_size': block_size, + 'dropout': dropout, + } +}, 'finetuned_model.pt') +print(" Model saved to finetuned_model.pt") \ No newline at end of file diff --git a/engine/fineweb_dataset.py b/engine/fineweb_dataset.py index 0a9f86e..9a0107f 100644 --- a/engine/fineweb_dataset.py +++ b/engine/fineweb_dataset.py @@ -36,10 +36,10 @@ def download_fineweb_sample(output_dir="engine", target_size_mb=30): current_bytes += sample_size if (i + 1) % 100 == 0: - print(f"Collected {i + 1} samples (~{current_bytes / (1024*1024):.2f} MB)") + print(f"Collected {i + 1} samples ({current_bytes / (1024*1024):.2f} MB)") print(f"\nDownloaded {len(samples)} samples ({current_bytes / (1024*1024):.2f} MB)") - output_file = os.path.join(output_dir, "fineweb_30mb.txt") + output_file = os.path.join(output_dir, "input.txt") with open(output_file, 'w', encoding='utf-8') as f: for sample in samples: f.write(sample['text']) @@ -54,7 +54,7 @@ def download_fineweb_sample(output_dir="engine", target_size_mb=30): if __name__ == "__main__": try: download_fineweb_sample() - print("\n✓ Download completed successfully!") + print("\nDownload completed successfully!") except Exception as e: print(f"\ Error: {e}") print("\nMake sure you have the 'datasets' library installed:") diff --git a/engine/main.py b/engine/main.py index 531728f..cffb4cb 100644 --- a/engine/main.py +++ b/engine/main.py @@ -3,6 +3,7 @@ from torch.nn import functional as F import time import sys +import os from pathlib import Path import tiktoken @@ -64,7 +65,7 @@ def success(msg): log(f" ok {msg}") # CONFIGURATION -cleaned_path = SCRIPT_DIR / "input.txt" +cleaned_path = Path(os.environ.get("data", SCRIPT_DIR / "input.txt")) train_split = 0.9 seed = 1337 @@ -384,4 +385,4 @@ def generate(self, idx, max_new_tokens): row("Total", f"{int(wall_clock // 60)}m {int(wall_clock % 60):02d}s", "", TICK) rule() blank() -log(f"{DOUBLE}\n") \ No newline at end of file +log(f"{DOUBLE}\n") diff --git a/frontend/.env.example b/frontend/.env.example new file mode 100644 index 0000000..4558365 --- /dev/null +++ b/frontend/.env.example @@ -0,0 +1 @@ +VITE_API_BASE_URL=http://localhost:3001 diff --git a/frontend/index.html b/frontend/index.html index c92d184..c694ea2 100644 --- a/frontend/index.html +++ b/frontend/index.html @@ -3,6 +3,14 @@ + + + + + Quadtrix.cpp Chat diff --git a/frontend/manifest.webmanifest b/frontend/manifest.webmanifest new file mode 100644 index 0000000..882922b --- /dev/null +++ b/frontend/manifest.webmanifest @@ -0,0 +1,20 @@ +{ + "name": "Quadtrix.cpp Chat", + "short_name": "Quadtrix", + "description": "Installable local chat interface for Quadtrix C++ and PyTorch model backends.", + "start_url": "/", + "scope": "/", + "display": "standalone", + "background_color": "#0a0a0a", + "theme_color": "#0a0a0a", + "orientation": "any", + "categories": ["developer", "productivity", "utilities"], + "icons": [ + { + "src": "/icon.svg", + "sizes": "any", + "type": "image/svg+xml", + "purpose": "any maskable" + } + ] +} diff --git a/frontend/package-lock.json b/frontend/package-lock.json new file mode 100644 index 0000000..e91afb1 --- /dev/null +++ b/frontend/package-lock.json @@ -0,0 +1,2933 @@ +{ + "name": "quadtrix-chat", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "quadtrix-chat", + "version": "1.0.0", + "dependencies": { + "@tanstack/react-query": "^5.62.11", + "date-fns": "^4.1.0", + "framer-motion": "^11.15.0", + "react": "^18.3.1", + "react-dom": "^18.3.1", + "zustand": "^5.0.2" + }, + "devDependencies": { + "@types/react": "^18.3.18", + "@types/react-dom": "^18.3.5", + "@vitejs/plugin-react": "^4.3.4", + "autoprefixer": "^10.4.20", + "postcss": "^8.4.49", + "tailwindcss": "^3.4.17", + "typescript": "^5.7.2", + "vite": "^6.0.7" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.1", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz", + "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.29.2.tgz", + "integrity": "sha512-HoGuUs4sCZNezVEKdVcwqmZN8GoHirLUcLaYVNBK2J0DadGtdcqgr3BCbvH8+XUo4NGjNl3VOtSjEKNzqfFgKw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.2.tgz", + "integrity": "sha512-4GgRzy/+fsBa72/RZVJmGKPmZu9Byn8o4MoLpmNe1m8ZfYnz5emHLQz3U4gLud6Zwl0RZIcgiLD7Uq7ySFuDLA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz", + "integrity": "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.12.tgz", + "integrity": "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.12.tgz", + "integrity": "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.12.tgz", + "integrity": "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.12.tgz", + "integrity": "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.12.tgz", + "integrity": "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.12.tgz", + "integrity": "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.12.tgz", + "integrity": "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.12.tgz", + "integrity": "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.12.tgz", + "integrity": "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.12.tgz", + "integrity": "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.12.tgz", + "integrity": "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.12.tgz", + "integrity": "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.12.tgz", + "integrity": "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.12.tgz", + "integrity": "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.12.tgz", + "integrity": "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.12.tgz", + "integrity": "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.12.tgz", + "integrity": "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.12.tgz", + "integrity": "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.12.tgz", + "integrity": "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.12.tgz", + "integrity": "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.12.tgz", + "integrity": "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.12.tgz", + "integrity": "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.12.tgz", + "integrity": "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.12.tgz", + "integrity": "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.12.tgz", + "integrity": "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.27", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz", + "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.2.tgz", + "integrity": "sha512-dnlp69efPPg6Uaw2dVqzWRfAWRnYVb1XJ8CyyhIbZeaq4CA5/mLeZ1IEt9QqQxmbdvagjLIm2ZL8BxXv5lH4Yw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.2.tgz", + "integrity": "sha512-OqZTwDRDchGRHHm/hwLOL7uVPB9aUvI0am/eQuWMNyFHf5PSEQmyEeYYheA0EPPKUO/l0uigCp+iaTjoLjVoHg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.2.tgz", + "integrity": "sha512-UwRE7CGpvSVEQS8gUMBe1uADWjNnVgP3Iusyda1nSRwNDCsRjnGc7w6El6WLQsXmZTbLZx9cecegumcitNfpmA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.2.tgz", + "integrity": "sha512-gjEtURKLCC5VXm1I+2i1u9OhxFsKAQJKTVB8WvDAHF+oZlq0GTVFOlTlO1q3AlCTE/DF32c16ESvfgqR7343/g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.2.tgz", + "integrity": "sha512-Bcl6CYDeAgE70cqZaMojOi/eK63h5Me97ZqAQoh77VPjMysA/4ORQBRGo3rRy45x4MzVlU9uZxs8Uwy7ZaKnBw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.2.tgz", + "integrity": "sha512-LU+TPda3mAE2QB0/Hp5VyeKJivpC6+tlOXd1VMoXV/YFMvk/MNk5iXeBfB4MQGRWyOYVJ01625vjkr0Az98OJQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.2.tgz", + "integrity": "sha512-2QxQrM+KQ7DAW4o22j+XZ6RKdxjLD7BOWTP0Bv0tmjdyhXSsr2Ul1oJDQqh9Zf5qOwTuTc7Ek83mOFaKnodPjg==", + "cpu": [ + "arm" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.2.tgz", + "integrity": "sha512-TbziEu2DVsTEOPif2mKWkMeDMLoYjx95oESa9fkQQK7r/Orta0gnkcDpzwufEcAO2BLBsD7mZkXGFqEdMRRwfw==", + "cpu": [ + "arm" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.2.tgz", + "integrity": "sha512-bO/rVDiDUuM2YfuCUwZ1t1cP+/yqjqz+Xf2VtkdppefuOFS2OSeAfgafaHNkFn0t02hEyXngZkxtGqXcXwO8Rg==", + "cpu": [ + "arm64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.2.tgz", + "integrity": "sha512-hr26p7e93Rl0Za+JwW7EAnwAvKkehh12BU1Llm9Ykiibg4uIr2rbpxG9WCf56GuvidlTG9KiiQT/TXT1yAWxTA==", + "cpu": [ + "arm64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.2.tgz", + "integrity": "sha512-pOjB/uSIyDt+ow3k/RcLvUAOGpysT2phDn7TTUB3n75SlIgZzM6NKAqlErPhoFU+npgY3/n+2HYIQVbF70P9/A==", + "cpu": [ + "loong64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.2.tgz", + "integrity": "sha512-2/w+q8jszv9Ww1c+6uJT3OwqhdmGP2/4T17cu8WuwyUuuaCDDJ2ojdyYwZzCxx0GcsZBhzi3HmH+J5pZNXnd+Q==", + "cpu": [ + "loong64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.2.tgz", + "integrity": "sha512-11+aL5vKheYgczxtPVVRhdptAM2H7fcDR5Gw4/bTcteuZBlH4oP9f5s9zYO9aGZvoGeBpqXI/9TZZihZ609wKw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.2.tgz", + "integrity": "sha512-i16fokAGK46IVZuV8LIIwMdtqhin9hfYkCh8pf8iC3QU3LpwL+1FSFGej+O7l3E/AoknL6Dclh2oTdnRMpTzFQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.2.tgz", + "integrity": "sha512-49FkKS6RGQoriDSK/6E2GkAsAuU5kETFCh7pG4yD/ylj9rKhTmO3elsnmBvRD4PgJPds5W2PkhC82aVwmUcJ7A==", + "cpu": [ + "riscv64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.2.tgz", + "integrity": "sha512-mjYNkHPfGpUR00DuM1ZZIgs64Hpf4bWcz9Z41+4Q+pgDx73UwWdAYyf6EG/lRFldmdHHzgrYyge5akFUW0D3mQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.2.tgz", + "integrity": "sha512-ALyvJz965BQk8E9Al/JDKKDLH2kfKFLTGMlgkAbbYtZuJt9LU8DW3ZoDMCtQpXAltZxwBHevXz5u+gf0yA0YoA==", + "cpu": [ + "s390x" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.2.tgz", + "integrity": "sha512-UQjrkIdWrKI626Du8lCQ6MJp/6V1LAo2bOK9OTu4mSn8GGXIkPXk/Vsp4bLHCd9Z9Iz2OTEaokUE90VweJgIYQ==", + "cpu": [ + "x64" + ], + "dev": true, + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.2.tgz", + "integrity": "sha512-bTsRGj6VlSdn/XD4CGyzMnzaBs9bsRxy79eTqTCBsA8TMIEky7qg48aPkvJvFe1HyzQ5oMZdg7AnVlWQSKLTnw==", + "cpu": [ + "x64" + ], + "dev": true, + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.2.tgz", + "integrity": "sha512-6d4Z3534xitaA1FcMWP7mQPq5zGwBmGbhphh2DwaA1aNIXUu3KTOfwrWpbwI4/Gr0uANo7NTtaykFyO2hPuFLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.2.tgz", + "integrity": "sha512-NetAg5iO2uN7eB8zE5qrZ3CSil+7IJt4WDFLcC75Ymywq1VZVD6qJ6EvNLjZ3rEm6gB7XW5JdT60c6MN35Z85Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.2.tgz", + "integrity": "sha512-NCYhOotpgWZ5kdxCZsv6Iudx0wX8980Q/oW4pNFNihpBKsDbEA1zpkfxJGC0yugsUuyDZ7gL37dbzwhR0VI7pQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.2.tgz", + "integrity": "sha512-RXsaOqXxfoUBQoOgvmmijVxJnW2IGB0eoMO7F8FAjaj0UTywUO/luSqimWBJn04WNgUkeNhh7fs7pESXajWmkg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.2.tgz", + "integrity": "sha512-qdAzEULD+/hzObedtmV6iBpdL5TIbKVztGiK7O3/KYSf+HIzU257+MX1EXJcyIiDbMAqmbwaufcYPvyRryeZtA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.2.tgz", + "integrity": "sha512-Nd/SgG27WoA9e+/TdK74KnHz852TLa94ovOYySo/yMPuTmpckK/jIF2jSwS3g7ELSKXK13/cVdmg1Z/DaCWKxA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@tanstack/query-core": { + "version": "5.100.6", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.100.6.tgz", + "integrity": "sha512-Os2CPUr98to98RYm+D4qGqGkiffn7MGSyl2547a4MljVkHE30AMJRqTiyCqBfMwzAx/I91vCkAxp5tHSla6Twg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, + "node_modules/@tanstack/react-query": { + "version": "5.100.6", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.100.6.tgz", + "integrity": "sha512-uVSrps0PV16Cxmcn2rvL+dUhwTpTUtiRW347AEeYxMZXO2pZe9ja7E24PAMGoQ5u2g89DD8u4QhOviBk+RN8RA==", + "license": "MIT", + "dependencies": { + "@tanstack/query-core": "5.100.6" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^18 || ^19" + } + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/prop-types": { + "version": "15.7.15", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.15.tgz", + "integrity": "sha512-F6bEyamV9jKGAFBEmlQnesRPGOQqS2+Uwi0Em15xenOxHaf2hv6L8YCVn3rPdPJOiJfPiCnLIRyvwVaqMY3MIw==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/@types/react": { + "version": "18.3.28", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.28.tgz", + "integrity": "sha512-z9VXpC7MWrhfWipitjNdgCauoMLRdIILQsAEV+ZesIzBq/oUlxk0m3ApZuMFCXdnS4U7KrI+l3WRUEGQ8K1QKw==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "@types/prop-types": "*", + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.3.7", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.7.tgz", + "integrity": "sha512-MEe3UeoENYVFXzoXEWsvcpg6ZvlrFNlOQ7EOsvhI3CfAXwzPfO8Qwuxd40nepsYKqyyVQnTdEfv68q91yLcKrQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^18.0.0" + } + }, + "node_modules/@vitejs/plugin-react": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz", + "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.0", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-beta.27", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.17.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true, + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true, + "license": "MIT" + }, + "node_modules/autoprefixer": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.5.0.tgz", + "integrity": "sha512-FMhOoZV4+qR6aTUALKX2rEqGG+oyATvwBt9IIzVR5rMa2HRWPkxf+P+PAJLD1I/H5/II+HuZcBJYEFBpq39ong==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.28.2", + "caniuse-lite": "^1.0.30001787", + "fraction.js": "^5.3.4", + "picocolors": "^1.1.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/baseline-browser-mapping": { + "version": "2.10.24", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.24.tgz", + "integrity": "sha512-I2NkZOOrj2XuguvWCK6OVh9GavsNjZjK908Rq3mIBK25+GD8vPX5w2WdxVqnQ7xx3SrZJiCiZFu+/Oz50oSYSA==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.cjs" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.2.tgz", + "integrity": "sha512-48xSriZYYg+8qXna9kwqjIVzuQxi+KYWp2+5nCYnYKPTr0LvD89Jqk2Or5ogxz0NUMfIjhh2lIUX/LyX9B4oIg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.10.12", + "caniuse-lite": "^1.0.30001782", + "electron-to-chromium": "^1.5.328", + "node-releases": "^2.0.36", + "update-browserslist-db": "^1.2.3" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001791", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001791.tgz", + "integrity": "sha512-yk0l/YSrOnFZk3UROpDLQD9+kC1l4meK/wed583AXrzoarMGJcbRi2Q4RaUYbKxYAsZ8sWmaSa/DsLmdBeI1vQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/date-fns": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-4.1.0.tgz", + "integrity": "sha512-Ukq0owbQXxa/U3EGtsdVBkR1w7KOQ5gIBqdH2hkvknzZPYvBxb/aa6E8L7tmjFtkwZBu3UXBbjIgPo/Ez4xaNg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/kossnocorp" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "dev": true, + "license": "MIT" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.344", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.344.tgz", + "integrity": "sha512-4MxfbmNDm+KPh066EZy+eUnkcDPcZ35wNmOWzFuh/ijvHsve6kbLTLURy88uCNK5FbpN+yk2nQY6BYh1GEt+wg==", + "dev": true, + "license": "ISC" + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/esbuild": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.12.tgz", + "integrity": "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.12", + "@esbuild/android-arm": "0.25.12", + "@esbuild/android-arm64": "0.25.12", + "@esbuild/android-x64": "0.25.12", + "@esbuild/darwin-arm64": "0.25.12", + "@esbuild/darwin-x64": "0.25.12", + "@esbuild/freebsd-arm64": "0.25.12", + "@esbuild/freebsd-x64": "0.25.12", + "@esbuild/linux-arm": "0.25.12", + "@esbuild/linux-arm64": "0.25.12", + "@esbuild/linux-ia32": "0.25.12", + "@esbuild/linux-loong64": "0.25.12", + "@esbuild/linux-mips64el": "0.25.12", + "@esbuild/linux-ppc64": "0.25.12", + "@esbuild/linux-riscv64": "0.25.12", + "@esbuild/linux-s390x": "0.25.12", + "@esbuild/linux-x64": "0.25.12", + "@esbuild/netbsd-arm64": "0.25.12", + "@esbuild/netbsd-x64": "0.25.12", + "@esbuild/openbsd-arm64": "0.25.12", + "@esbuild/openbsd-x64": "0.25.12", + "@esbuild/openharmony-arm64": "0.25.12", + "@esbuild/sunos-x64": "0.25.12", + "@esbuild/win32-arm64": "0.25.12", + "@esbuild/win32-ia32": "0.25.12", + "@esbuild/win32-x64": "0.25.12" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fastq": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/fraction.js": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz", + "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/framer-motion": { + "version": "11.18.2", + "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-11.18.2.tgz", + "integrity": "sha512-5F5Och7wrvtLVElIpclDT0CBzMVg3dL22B64aZwHtsIY8RB4mXICLrkajK4G9R+ieSAGcgrLeae2SeUTg2pr6w==", + "license": "MIT", + "dependencies": { + "motion-dom": "^11.18.1", + "motion-utils": "^11.18.1", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "@emotion/is-prop-valid": "*", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@emotion/is-prop-valid": { + "optional": true + }, + "react": { + "optional": true + }, + "react-dom": { + "optional": true + } + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/hasown": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.3.tgz", + "integrity": "sha512-ej4AhfhfL2Q2zpMmLo7U1Uv9+PyhIZpgQLGT1F9miIGmiCJIoCgSmczFdrc97mWT4kVY72KA+WnnhJ5pghSvSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "license": "MIT", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/motion-dom": { + "version": "11.18.1", + "resolved": "https://registry.npmjs.org/motion-dom/-/motion-dom-11.18.1.tgz", + "integrity": "sha512-g76KvA001z+atjfxczdRtw/RXOM3OMSdd1f4DL77qCTF/+avrRJiawSG4yDibEQ215sr9kpinSlX2pCTJ9zbhw==", + "license": "MIT", + "dependencies": { + "motion-utils": "^11.18.1" + } + }, + "node_modules/motion-utils": { + "version": "11.18.1", + "resolved": "https://registry.npmjs.org/motion-utils/-/motion-utils-11.18.1.tgz", + "integrity": "sha512-49Kt+HKjtbJKLtgO/LKj9Ld+6vw9BjH5d9sc40R/kVyH8GLAXgT42M2NnuPcJNuA3s9ZfZBUcwIgpmZWGEE+hA==", + "license": "MIT" + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/node-releases": { + "version": "2.0.38", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.38.tgz", + "integrity": "sha512-3qT/88Y3FbH/Kx4szpQQ4HzUbVrHPKTLVpVocKiLfoYvw9XSGOX2FmD2d6DrXbVYyAQTF2HeF6My8jmzx7/CRw==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz", + "integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/postcss": { + "version": "8.5.12", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.12.tgz", + "integrity": "sha512-W62t/Se6rA0Az3DfCL0AqJwXuKwBeYg6nOaIgzP+xZ7N5BFCI7DYi1qs6ygUYT6rvfi6t9k65UMLJC+PHZpDAA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-import": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-js": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.1.0.tgz", + "integrity": "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-load-config": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz", + "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "lilconfig": "^3.1.1" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "jiti": ">=1.21.0", + "postcss": ">=8.0.9", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + }, + "postcss": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/react-refresh": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz", + "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.12", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.12.tgz", + "integrity": "sha512-TyeJ1zif53BPfHootBGwPRYT1RUt6oGWsaQr8UyZW/eAm9bKoijtvruSDEmZHm92CwS9nj7/fWttqPCgzep8CA==", + "dev": true, + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rollup": { + "version": "4.60.2", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.2.tgz", + "integrity": "sha512-J9qZyW++QK/09NyN/zeO0dG/1GdGfyp9lV8ajHnRVLfo/uFsbji5mHnDgn/qYdUHyCkM2N+8VyspgZclfAh0eQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.60.2", + "@rollup/rollup-android-arm64": "4.60.2", + "@rollup/rollup-darwin-arm64": "4.60.2", + "@rollup/rollup-darwin-x64": "4.60.2", + "@rollup/rollup-freebsd-arm64": "4.60.2", + "@rollup/rollup-freebsd-x64": "4.60.2", + "@rollup/rollup-linux-arm-gnueabihf": "4.60.2", + "@rollup/rollup-linux-arm-musleabihf": "4.60.2", + "@rollup/rollup-linux-arm64-gnu": "4.60.2", + "@rollup/rollup-linux-arm64-musl": "4.60.2", + "@rollup/rollup-linux-loong64-gnu": "4.60.2", + "@rollup/rollup-linux-loong64-musl": "4.60.2", + "@rollup/rollup-linux-ppc64-gnu": "4.60.2", + "@rollup/rollup-linux-ppc64-musl": "4.60.2", + "@rollup/rollup-linux-riscv64-gnu": "4.60.2", + "@rollup/rollup-linux-riscv64-musl": "4.60.2", + "@rollup/rollup-linux-s390x-gnu": "4.60.2", + "@rollup/rollup-linux-x64-gnu": "4.60.2", + "@rollup/rollup-linux-x64-musl": "4.60.2", + "@rollup/rollup-openbsd-x64": "4.60.2", + "@rollup/rollup-openharmony-arm64": "4.60.2", + "@rollup/rollup-win32-arm64-msvc": "4.60.2", + "@rollup/rollup-win32-ia32-msvc": "4.60.2", + "@rollup/rollup-win32-x64-gnu": "4.60.2", + "@rollup/rollup-win32-x64-msvc": "4.60.2", + "fsevents": "~2.3.2" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "license": "MIT", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sucrase": { + "version": "3.35.1", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.1.tgz", + "integrity": "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "tinyglobby": "^0.2.11", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tailwindcss": { + "version": "3.4.19", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.19.tgz", + "integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.6.0", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.3.2", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.21.7", + "lilconfig": "^3.1.3", + "micromatch": "^4.0.8", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.1.1", + "postcss": "^8.4.47", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", + "postcss-nested": "^6.2.0", + "postcss-selector-parser": "^6.1.2", + "resolve": "^1.22.8", + "sucrase": "^3.35.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.16", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.16.tgz", + "integrity": "sha512-pn99VhoACYR8nFHhxqix+uvsbXineAasWm5ojXoN8xEwK5Kd3/TrhNn1wByuD52UxWRLy8pu+kRMniEi6Eq9Zg==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.4" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/vite": { + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.4.2.tgz", + "integrity": "sha512-2N/55r4JDJ4gdrCvGgINMy+HH3iRpNIz8K6SFwVsA+JbQScLiC+clmAxBgwiSPgcG9U15QmvqCGWzMbqda5zGQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.25.0", + "fdir": "^6.4.4", + "picomatch": "^4.0.2", + "postcss": "^8.5.3", + "rollup": "^4.34.9", + "tinyglobby": "^0.2.13" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "jiti": ">=1.21.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vite/node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/vite/node_modules/picomatch": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/zustand": { + "version": "5.0.12", + "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.12.tgz", + "integrity": "sha512-i77ae3aZq4dhMlRhJVCYgMLKuSiZAaUPAct2AksxQ+gOtimhGMdXljRT21P5BNpeT4kXlLIckvkPM029OljD7g==", + "license": "MIT", + "engines": { + "node": ">=12.20.0" + }, + "peerDependencies": { + "@types/react": ">=18.0.0", + "immer": ">=9.0.6", + "react": ">=18.0.0", + "use-sync-external-store": ">=1.2.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "immer": { + "optional": true + }, + "react": { + "optional": true + }, + "use-sync-external-store": { + "optional": true + } + } + } + } +} diff --git a/frontend/package.json b/frontend/package.json new file mode 100644 index 0000000..37aa936 --- /dev/null +++ b/frontend/package.json @@ -0,0 +1,29 @@ +{ + "name": "quadtrix-chat", + "version": "1.0.0", + "private": true, + "type": "module", + "scripts": { + "dev": "vite --host 0.0.0.0", + "build": "tsc && vite build", + "preview": "vite preview" + }, + "dependencies": { + "@tanstack/react-query": "^5.62.11", + "date-fns": "^4.1.0", + "framer-motion": "^11.15.0", + "react": "^18.3.1", + "react-dom": "^18.3.1", + "zustand": "^5.0.2" + }, + "devDependencies": { + "@types/react": "^18.3.18", + "@types/react-dom": "^18.3.5", + "@vitejs/plugin-react": "^4.3.4", + "autoprefixer": "^10.4.20", + "postcss": "^8.4.49", + "tailwindcss": "^3.4.17", + "typescript": "^5.7.2", + "vite": "^6.0.7" + } +} diff --git a/frontend/postcss.config.js b/frontend/postcss.config.js new file mode 100644 index 0000000..2aa7205 --- /dev/null +++ b/frontend/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +}; diff --git a/frontend/public/icon.svg b/frontend/public/icon.svg new file mode 100644 index 0000000..43d7866 --- /dev/null +++ b/frontend/public/icon.svg @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/frontend/public/manifest.webmanifest b/frontend/public/manifest.webmanifest new file mode 100644 index 0000000..882922b --- /dev/null +++ b/frontend/public/manifest.webmanifest @@ -0,0 +1,20 @@ +{ + "name": "Quadtrix.cpp Chat", + "short_name": "Quadtrix", + "description": "Installable local chat interface for Quadtrix C++ and PyTorch model backends.", + "start_url": "/", + "scope": "/", + "display": "standalone", + "background_color": "#0a0a0a", + "theme_color": "#0a0a0a", + "orientation": "any", + "categories": ["developer", "productivity", "utilities"], + "icons": [ + { + "src": "/icon.svg", + "sizes": "any", + "type": "image/svg+xml", + "purpose": "any maskable" + } + ] +} diff --git a/frontend/public/sw.js b/frontend/public/sw.js new file mode 100644 index 0000000..4f5d599 --- /dev/null +++ b/frontend/public/sw.js @@ -0,0 +1,52 @@ +const CACHE_NAME = 'quadtrix-chat-v1'; +const APP_SHELL = ['/', '/manifest.webmanifest', '/icon.svg']; + +self.addEventListener('install', (event) => { + event.waitUntil(caches.open(CACHE_NAME).then((cache) => cache.addAll(APP_SHELL))); + self.skipWaiting(); +}); + +self.addEventListener('activate', (event) => { + event.waitUntil( + caches.keys().then((keys) => + Promise.all(keys.filter((key) => key !== CACHE_NAME).map((key) => caches.delete(key))) + ) + ); + self.clients.claim(); +}); + +self.addEventListener('fetch', (event) => { + if (event.request.method !== 'GET') { + return; + } + + const url = new URL(event.request.url); + + const isApiRequest = + url.pathname.startsWith('/api/') || + url.pathname === '/generate' || + url.pathname === '/health' || + url.pathname === '/status'; + + if (isApiRequest) { + return; + } + + event.respondWith( + caches.match(event.request).then((cached) => { + if (cached) { + return cached; + } + return fetch(event.request) + .then((response) => { + if (!response || response.status !== 200 || response.type === 'opaque') { + return response; + } + const copy = response.clone(); + caches.open(CACHE_NAME).then((cache) => cache.put(event.request, copy)); + return response; + }) + .catch(() => caches.match('/')); + }) + ); +}); diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx new file mode 100644 index 0000000..6141dcf --- /dev/null +++ b/frontend/src/App.tsx @@ -0,0 +1,39 @@ +import { useCallback } from "react"; + +import { ChatView } from "./components/chat/ChatView"; +import { AppLayout } from "./components/layout/AppLayout"; +import { useKeyboardShortcut } from "./hooks/useKeyboardShortcut"; +import { useCreateSession } from "./api/sessions"; +import { useSessionStore } from "./store/sessionStore"; +import { useSettingsStore } from "./store/settingsStore"; + +export default function App() { + const clearMessages = useSessionStore((state) => state.clearMessages); + const setActiveSession = useSessionStore((state) => state.setActiveSession); + const setMessages = useSessionStore((state) => state.setMessages); + const setSettingsOpen = useSettingsStore((state) => state.setSettingsOpen); + const createSession = useCreateSession(); + + const newConversation = useCallback(() => { + createSession.mutate(undefined, { + onSuccess: (session) => { + setActiveSession(session.id); + setMessages([]); + }, + }); + }, [createSession, setActiveSession, setMessages]); + + const openSettings = useCallback(() => setSettingsOpen(true), [setSettingsOpen]); + const closeSettings = useCallback(() => setSettingsOpen(false), [setSettingsOpen]); + + useKeyboardShortcut(["ctrl", "l"], clearMessages); + useKeyboardShortcut(["ctrl", "n"], newConversation); + useKeyboardShortcut(["ctrl", ","], openSettings); + useKeyboardShortcut(["escape"], closeSettings); + + return ( + + + + ); +} diff --git a/frontend/src/components/chat/ChatView.tsx b/frontend/src/components/chat/ChatView.tsx new file mode 100644 index 0000000..68c29eb --- /dev/null +++ b/frontend/src/components/chat/ChatView.tsx @@ -0,0 +1,107 @@ +import { useEffect, useState } from "react"; + +import { ApiClientError } from "../../api/client"; +import { useSendMessage } from "../../api/chat"; +import { useSessionMessages } from "../../api/sessions"; +import { useHealth } from "../../api/health"; +import { useSessionStore } from "../../store/sessionStore"; +import { useSettingsStore } from "../../store/settingsStore"; +import type { Message } from "../../types"; +import { EmptyState } from "./EmptyState"; +import { MessageList } from "./MessageList"; +import { InputBar } from "../input/InputBar"; + +export function ChatView() { + const [draft, setDraft] = useState(""); + const activeSessionId = useSessionStore((state) => state.activeSessionId); + const messages = useSessionStore((state) => state.messages); + const setActiveSession = useSessionStore((state) => state.setActiveSession); + const setMessages = useSessionStore((state) => state.setMessages); + const addMessage = useSessionStore((state) => state.addMessage); + const replaceMessage = useSessionStore((state) => state.replaceMessage); + const maxTokens = useSettingsStore((state) => state.maxTokens); + const temperature = useSettingsStore((state) => state.temperature); + const modelBackend = useSettingsStore((state) => state.modelBackend); + const sendMessage = useSendMessage(); + const { data: health } = useHealth(); + const { data: fetchedMessages } = useSessionMessages(activeSessionId); + const online = modelBackend === "cpp" ? health?.cpp_server === "ok" : health?.torch_model === "ok"; + + useEffect(() => { + if (fetchedMessages) { + setMessages(fetchedMessages); + } + }, [fetchedMessages, setMessages]); + + const now = (): string => new Date().toISOString(); + + const handleSend = (prompt: string): void => { + const sessionId = activeSessionId ?? crypto.randomUUID(); + setActiveSession(sessionId); + const userMessage: Message = { + id: `local-${crypto.randomUUID()}`, + session_id: sessionId, + role: "user", + text: prompt, + chars: 0, + seconds: 0, + created_at: now(), + }; + const pendingId = `pending-${crypto.randomUUID()}`; + const pendingMessage: Message = { + id: pendingId, + session_id: sessionId, + role: "assistant", + text: "", + chars: 0, + seconds: 0, + created_at: now(), + pending: true, + }; + addMessage(userMessage); + addMessage(pendingMessage); + sendMessage.mutate( + { session_id: sessionId, prompt, max_tokens: maxTokens, temperature, stream: false, model_backend: modelBackend }, + { + onSuccess: (response) => { + setActiveSession(response.session_id); + replaceMessage(pendingId, { + id: response.id, + session_id: response.session_id, + role: "assistant", + text: response.text, + prompt: response.prompt, + chars: response.chars, + seconds: response.seconds, + created_at: response.created_at, + }); + }, + onError: (error) => { + const message = + error instanceof ApiClientError + ? error.apiError.message + : "Could not reach the selected model. Check the C++ server or engine checkpoint."; + replaceMessage(pendingId, { + ...pendingMessage, + text: message, + pending: false, + error: "model_unavailable", + }); + }, + }, + ); + }; + + return ( +
+ {messages.length === 0 ? : } + +
+ ); +} diff --git a/frontend/src/components/chat/EmptyState.tsx b/frontend/src/components/chat/EmptyState.tsx new file mode 100644 index 0000000..ce75d9a --- /dev/null +++ b/frontend/src/components/chat/EmptyState.tsx @@ -0,0 +1,15 @@ +export function EmptyState() { + return ( +
+
+
+ Quadtrix.cpp icon +
+
+

Quadtrix.cpp

+

Minimal local chat interface. Start typing below to begin.

+
+
+
+ ); +} diff --git a/frontend/src/components/chat/MessageAvatar.tsx b/frontend/src/components/chat/MessageAvatar.tsx new file mode 100644 index 0000000..25373d5 --- /dev/null +++ b/frontend/src/components/chat/MessageAvatar.tsx @@ -0,0 +1,20 @@ +import type { Role } from "../../types"; + +interface MessageAvatarProps { + role: Role; +} + +export function MessageAvatar({ role }: MessageAvatarProps) { + const isUser = role === "user"; + return ( +
+ {isUser ? "You" : "Q"} +
+ ); +} diff --git a/frontend/src/components/chat/MessageList.tsx b/frontend/src/components/chat/MessageList.tsx new file mode 100644 index 0000000..e38a0af --- /dev/null +++ b/frontend/src/components/chat/MessageList.tsx @@ -0,0 +1,20 @@ +import { useAutoScroll } from "../../hooks/useAutoScroll"; +import type { Message } from "../../types"; +import { MessageRow } from "./MessageRow"; + +interface MessageListProps { + messages: Message[]; +} + +export function MessageList({ messages }: MessageListProps) { + const scrollRef = useAutoScroll(messages.length); + return ( +
+
+ {messages.map((message) => ( + + ))} +
+
+ ); +} diff --git a/frontend/src/components/chat/MessageRow.tsx b/frontend/src/components/chat/MessageRow.tsx new file mode 100644 index 0000000..372d585 --- /dev/null +++ b/frontend/src/components/chat/MessageRow.tsx @@ -0,0 +1,64 @@ +import { motion } from "framer-motion"; +import { useState } from "react"; + +import { formatRelativeTime } from "../../utils/time"; +import type { Message } from "../../types"; +import { MessageAvatar } from "./MessageAvatar"; +import { ThinkingIndicator } from "./ThinkingIndicator"; + +interface MessageRowProps { + message: Message; +} + +export function MessageRow({ message }: MessageRowProps) { + const [copied, setCopied] = useState(false); + const isUser = message.role === "user"; + + const copyText = async (): Promise => { + try { + await navigator.clipboard.writeText(message.text); + setCopied(true); + window.setTimeout(() => setCopied(false), 1200); + } catch (error) { + setCopied(false); + } + }; + + return ( + + {!isUser && } +
+
+ {isUser ? "You" : "Quadtrix"} + {formatRelativeTime(message.created_at)} + {!isUser && !message.pending && ( + + )} +
+
+ {message.pending ? : {message.text}} +
+
+ {isUser && } +
+ ); +} diff --git a/frontend/src/components/chat/StarterPrompts.tsx b/frontend/src/components/chat/StarterPrompts.tsx new file mode 100644 index 0000000..fb10a22 --- /dev/null +++ b/frontend/src/components/chat/StarterPrompts.tsx @@ -0,0 +1,22 @@ +interface StarterPromptsProps { + onSelect: (prompt: string) => void; +} + +const prompts = ["Once upon a time", "Timmy is a", "hi how are you", "The little door opened"]; + +export function StarterPrompts({ onSelect }: StarterPromptsProps) { + return ( +
+ {prompts.map((prompt) => ( + + ))} +
+ ); +} diff --git a/frontend/src/components/chat/ThinkingIndicator.tsx b/frontend/src/components/chat/ThinkingIndicator.tsx new file mode 100644 index 0000000..e83d0f5 --- /dev/null +++ b/frontend/src/components/chat/ThinkingIndicator.tsx @@ -0,0 +1,12 @@ +export function ThinkingIndicator() { + return ( +
+ Quadtrix is thinking + + + + + +
+ ); +} diff --git a/frontend/src/components/input/CharCounter.tsx b/frontend/src/components/input/CharCounter.tsx new file mode 100644 index 0000000..5ddf619 --- /dev/null +++ b/frontend/src/components/input/CharCounter.tsx @@ -0,0 +1,9 @@ +interface CharCounterProps { + count: number; + max: number; +} + +export function CharCounter({ count, max }: CharCounterProps) { + const over = count > max; + return {count}/{max}; +} diff --git a/frontend/src/components/input/InputBar.tsx b/frontend/src/components/input/InputBar.tsx new file mode 100644 index 0000000..bcfc22b --- /dev/null +++ b/frontend/src/components/input/InputBar.tsx @@ -0,0 +1,77 @@ +import { useEffect, useRef, useState } from "react"; + +import { CharCounter } from "./CharCounter"; + +interface InputBarProps { + disabled: boolean; + isSending: boolean; + onSend: (prompt: string) => void; + initialValue?: string; + onDraftChange?: (value: string) => void; +} + +export function InputBar({ disabled, isSending, onSend, initialValue = "", onDraftChange }: InputBarProps) { + const [value, setValue] = useState(initialValue); + const ref = useRef(null); + + useEffect(() => { + setValue(initialValue); + }, [initialValue]); + + useEffect(() => { + onDraftChange?.(value); + }, [onDraftChange, value]); + + useEffect(() => { + if (!ref.current) { + return; + } + ref.current.style.height = "auto"; + ref.current.style.height = `${Math.min(ref.current.scrollHeight, 120)}px`; + }, [value]); + + const submit = (): void => { + const prompt = value.trim(); + if (!prompt || disabled || isSending) { + return; + } + setValue(""); + onSend(prompt); + }; + + return ( +
+
+
+