diff --git a/.gitignore b/.gitignore index fe58f6e91f..9d27c4ccb8 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ cabal.project.local~ .hpc/ *.tix .coverage + diff --git a/2026-02-04-xftp-web-persistent-connections.md b/2026-02-04-xftp-web-persistent-connections.md new file mode 100644 index 0000000000..6aa8c2f51c --- /dev/null +++ b/2026-02-04-xftp-web-persistent-connections.md @@ -0,0 +1,53 @@ +# XFTPClientAgent Pattern + +## TOC +1. Executive Summary +2. Changes: client.ts +3. Changes: agent.ts +4. Changes: test/browser.test.ts +5. Verification + +## Executive Summary + +Add `XFTPClientAgent` — a per-server connection pool matching the Haskell pattern. The agent caches `XFTPClient` instances by server URL. All orchestration functions (`uploadFile`, `downloadFile`, `deleteFile`) take `agent` as first parameter and use `getXFTPServerClient(agent, server)` instead of calling `connectXFTP` directly. Connections stay open on success; the caller creates and closes the agent. + +`connectXFTP` and `closeXFTP` stay exported (used by `XFTPWebTests.hs` Haskell tests). The `browserClients` hack, per-function `connections: Map`, and `getOrConnect` are deleted. + +## Changes: client.ts + +**Add** after types section: `XFTPClientAgent` interface, `newXFTPAgent`, `getXFTPServerClient`, `closeXFTPServerClient`, `closeXFTPAgent`. + +**Delete**: `browserClients` Map and all `isNode` browser-cache checks in `connectXFTP` and `closeXFTP`. + +**Revert `closeXFTP`** to unconditional `c.transport.close()` (browser transport.close() is already a no-op). + +`connectXFTP` stays exported (backward compat) but becomes a raw low-level function — no caching. + +## Changes: agent.ts + +**Imports**: replace `connectXFTP`/`closeXFTP` with `getXFTPServerClient`/`closeXFTPAgent` etc. + +**Re-export** from agent.ts: `newXFTPAgent`, `closeXFTPAgent`, `XFTPClientAgent`. + +**`uploadFile`**: add `agent: XFTPClientAgent` as first param. Replace `connectXFTP` → `getXFTPServerClient`. Remove `finally { closeXFTP }`. Pass `agent` to `uploadRedirectDescription`. + +**`uploadRedirectDescription`**: change from `(client, server, innerFd)` to `(agent, server, innerFd)`. Get client via `getXFTPServerClient`. + +**`downloadFile`**: add `agent` param. Delete local `connections: Map`. Replace `getOrConnect` → `getXFTPServerClient`. Remove finally cleanup. Pass `agent` to `downloadWithRedirect`. + +**`downloadWithRedirect`**: add `agent` param. Same replacements. Remove try/catch cleanup. Recursive call passes `agent`. + +**`deleteFile`**: add `agent` param. Same pattern. + +**Delete**: `getOrConnect` function entirely. + +## Changes: test/browser.test.ts + +Create agent before operations, pass to upload/download, close in finally. + +## Verification + +1. `npx vitest --run` — browser round-trip test passes +2. No remaining `browserClients`, `getOrConnect`, or per-function `connections: Map` locals +3. `connectXFTP` and `closeXFTP` still exported (XFTPWebTests.hs compat) +4. All orchestration functions take `agent` as first param diff --git a/contributing/CODE.md b/contributing/CODE.md index c644a16064..ab5d7efccf 100644 --- a/contributing/CODE.md +++ b/contributing/CODE.md @@ -2,7 +2,17 @@ This file provides guidance on coding style and approaches and on building the code. -## Code Style and Formatting +## Code Security + +When designing code and planning implementations: +- Apply adversarial thinking, and consider what may happen if one of the communicating parties is malicious. +- Formulate an explicit threat model for each change - who can do which undesirable things and under which circumstances. + +## Code Quality Standards + +Haskell client and server code serves as system specification, not just implementation — we use type-driven design to reflect the business domain in types. Quality, conciseness, and clarity of Haskell code are critical. + +## Code Style, Formatting and Approaches The project uses **fourmolu** for Haskell code formatting. Configuration is in `fourmolu.yaml`. @@ -41,6 +51,11 @@ Some files that use CPP language extension cannot be formatted as a whole, so in - Never do refactoring unless it substantially reduces cost of solving the current problem, including the cost of refactoring - Aim to minimize the code changes - do what is minimally required to solve users' problems +**Document and code structure:** +- **Never move existing code or sections around** - add new content at appropriate locations without reorganizing existing structure. +- When adding new sections to documents, continue the existing numbering scheme. +- Minimize diff size - prefer small, targeted changes over reorganization. + **Code analysis and review:** - Trace data flows end-to-end: from origin, through storage/parameters, to consumption. Flag values that are discarded and reconstructed from partial data (e.g. extracted from a URI missing original fields) — this is usually a bug. - Read implementations of called functions, not just signatures — if duplication involves a called function, check whether decomposing it resolves the duplication. diff --git a/notes-flow.txt b/notes-flow.txt deleted file mode 100644 index 93f9845092..0000000000 --- a/notes-flow.txt +++ /dev/null @@ -1,23 +0,0 @@ -common: - corrId - random BS, used as CbNonce - entityId - p2r tlsUniq - -# setup -s->p: "proxy", uri, auth? - # unless connected - p->r: "p_handshake" - p<-r: "r_key", tls-signed dh pub -s<-r: "r_key", tls-signed dh pub # reply entityId contains tlsUniq - -# working -s ; generate random dh priv, make shared secret -s->p: s2r("forward", random dh pub, SEND command blob) - p->r: p2r("forward", random dh pub, s2r("forward", ...))) - r->c@ "msg", ... - p<-r: p2r("r_res", s2r("ok" / "error", error)) -s<-p@ s2r("ok" / "error", error) - -# expired - p<-r@ p2r("error", "key expired") -s<-p@ "error", "key expired" -s ; reconnect \ No newline at end of file diff --git a/rfcs/2026-01-30-send-file-page.md b/rfcs/2026-01-30-send-file-page.md new file mode 100644 index 0000000000..0e35d44994 --- /dev/null +++ b/rfcs/2026-01-30-send-file-page.md @@ -0,0 +1,1081 @@ +# Send File Page — Web-based XFTP File Transfer + +## 1. Problem & Business Case + +There is no way to send or receive files using SimpleX without installing the app. A static web page that implements the XFTP protocol client-side would allow anyone with a browser to upload and download files via XFTP servers, promoting app adoption. + +**Business constraints:** +- Web page allows up to 100 MB uploads; app allows up to 1 GB. +- Page must promote app installation (e.g., banner, messaging around limits). + +**Security constraint:** +- The server hosting the page must never access file content or file descriptions. The file description is carried in the URL hash fragment (`#`), which browsers do not send to the server. +- The only way to compromise transfer security is page substitution (serving malicious JS). Mitigations: standard web security (HTTPS, CSP, SRI) and IPFS hosting with page fingerprints published in multiple independent locations. + +## 2. Design Overview + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Static web page (HTML + JS bundle) │ +│ ┌───────────────────────────────────────────────────────────┐ │ +│ │ TypeScript XFTP Client Library │ │ +│ │ ┌──────────┐ ┌──────────┐ ┌───────────┐ ┌─────────────┐ │ │ +│ │ │ Protocol │ │ Crypto │ │ Transport │ │ Description │ │ │ +│ │ │ Encoding │ │(libsodium│ │ (fetch │ │ (YAML parse │ │ │ +│ │ │ │ │ .js) │ │ API) │ │ + encode) │ │ │ +│ │ └──────────┘ └──────────┘ └───────────┘ └─────────────┘ │ │ +│ └───────────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────┘ + │ fetch() over HTTP/2 │ fetch() over HTTP/2 + ▼ ▼ +┌─────────────────┐ ┌─────────────────┐ +│ XFTP Server 1 │ │ XFTP Server 2 │ +│ (SNI→web cert) │ │ (SNI→web cert) │ +│ (+CORS headers) │ │ (+CORS headers) │ +└─────────────────┘ └─────────────────┘ +``` + +**Key decisions:** +- **Language:** TypeScript (readable, auditable, good ecosystem, Node.js for testing). +- **Crypto:** libsodium.js (WASM-compiled libsodium; provides XSalsa20-Poly1305, Ed25519, X25519). +- **Transport:** Browser `fetch()` API over HTTP/2 with `ReadableStream` for streaming. +- **No backend logic:** The page is entirely static. All XFTP operations happen client-side. + +## 3. Web Page UX + +### 3.1 Upload Flow + +1. **Landing state:** Drag-and-drop zone with centered upload icon and "Drop file here or click to upload" text. File size limit displayed ("Up to 100 MB — install SimpleX app for up to 1 GB"). Simple white background, no decoration. +2. **File selected:** Show file name and size. Begin upload immediately. +3. **Upload progress:** Large circular progress indicator (clockwise, starting from 3 o'clock position). Percentage in center. Cancel button below. +4. **Upload complete:** Show shareable link with copy button. QR code if link is short enough (≤ ~1000 chars). "Install SimpleX for larger files" CTA. + +### 3.2 Download Flow + +1. **Link opened:** Page parses hash fragment, shows file name and size. "Download" button. +2. **Download progress:** Same circular progress indicator as upload. +3. **Download complete:** Browser save dialog triggered (via Blob + download link, or File System Access API where available). + +### 3.3 Error States + +- File too large (> 100 MB): Show limit message with app install CTA. +- Server unreachable: Retry with exponential backoff, show error after exhausting retries. +- File expired: "This file is no longer available" message. +- Decryption failure: "File corrupted or link invalid" message. + +## 4. URL Scheme + +### 4.1 Format + +``` +https://example.com/file/# +``` + +- Hash fragment is never sent to the server. +- Compression: DEFLATE (raw, no gzip/zlib wrapper) — better ratio than LZW for structured text like YAML. +- Encoding: Base64url (RFC 4648 §5) — no `+`, `/`, `=`, or `%` characters. + +Alternative: LZW + base64url if DEFLATE proves problematic. Both should be evaluated. + +### 4.2 Redirect Mechanism + +For files with many chunks, the YAML file description can exceed a practical URL length. The threshold is ~600 bytes of compressed+encoded description (configurable). + +**Flow when description is too large:** +1. Serialize recipient file description to YAML. +2. Encrypt YAML using fresh key + nonce (same XSalsa20-Poly1305 as files). +3. Upload encrypted YAML as a single-chunk "file" to one randomly chosen XFTP server. +4. Create redirect description pointing to this uploaded description. +5. Encode redirect description into URL (always small — single chunk). + +**Download with redirect:** +1. Parse URL → redirect description (has `redirect` field with `size` and `digest`). +2. Download the description "file" using the single chunk reference. +3. Decrypt → get full YAML description. +4. Validate size and digest match redirect metadata. +5. Proceed with normal download using full description. + +### 4.3 Estimated URL Lengths + +These estimates are preliminary and may be incorrect. + +| Scenario | Chunks | Compressed+encoded size | URL length | +|----------|--------|------------------------|------------| +| Small file (1 chunk, 1 server) | 1 | ~300 bytes | ~350 chars | +| Medium file (5 chunks, 1 server) | 5 | ~500 bytes | ~550 chars | +| Large file (25+ chunks) | 25 | Exceeds threshold → redirect | ~350 chars | + +## 5. TypeScript XFTP Client Library + +### 5.1 Module Structure + +``` +xftp-web/src/ # Separate npm project (see §12.19) +├── protocol/ +│ ├── encoding.ts # Binary encoding/decoding ← Simplex.Messaging.Encoding ✓ +│ ├── commands.ts # XFTP commands + responses ← Simplex.FileTransfer.Protocol ✓ +│ ├── transmission.ts # Transmission framing, signing, padding ✓ +│ ├── handshake.ts # XFTP handshake (standard + web) ← FileTransfer.Transport ✓ +│ ├── address.ts # XFTP server address parser ← Simplex.Messaging.Protocol ✓ +│ ├── chunks.ts # Chunk sizes + splitting ← FileTransfer.Chunks + Client.hs ✓ +│ ├── client.ts # Transport crypto (cbAuthenticate, transit encrypt/decrypt) ✓ +│ └── description.ts # Types, YAML, validation, base64url ← FileTransfer.Description ✓ +├── crypto/ +│ ├── secretbox.ts # XSalsa20-Poly1305 streaming encryption/decryption ✓ +│ ├── file.ts # File-level encryption/decryption (encryptFile, decryptChunks) ✓ +│ ├── keys.ts # Ed25519, X25519, Ed448 key generation and operations ✓ +│ ├── digest.ts # SHA-256/SHA-512 hashing ✓ +│ ├── padding.ts # Block padding/unpadding (2-byte length prefix + '#' fill) ✓ +│ └── identity.ts # Web handshake identity proof verification (Ed25519/Ed448) ✓ +├── download.ts # Download helper functions (DH, transit-decrypt, file-decrypt) ✓ +├── client.ts # HTTP/2 XFTP client ← Simplex.FileTransfer.Client +└── agent.ts # Upload/download orchestration + URI ← FileTransfer.Client.Main +``` + +### 5.2 Binary Encoding + +The XFTP wire format uses a custom binary encoding (from `Simplex.Messaging.Encoding`). Key patterns: + +- **Length-prefixed bytestrings:** `<1-byte length>` (`ByteString`, max 255 bytes — used for entity IDs, short fields) or `<2-byte big-endian length>` (`Large`, max 65535 bytes — used for larger data). +- **Transmission format:** ` ` + - Fields separated by space (0x20). + - `signature`: Ed25519 signature over `(sessionId ++ corrId ++ entityId ++ encodedCommand)`. + - `corrId`: Correlation ID (arbitrary, echoed in response). + - `entityId`: File/chunk ID on server. + - Command: tag + space-separated fields. +- **Padding:** 2-byte big-endian length prefix + message + `#` (0x23) fill to block size (16384 bytes). + +### 5.3 Crypto Operations Catalog + +| Operation | Algorithm | Key Size | Nonce Size | Tag Size | Library | +|-----------|-----------|----------|------------|----------|---------| +| File encryption | XSalsa20-Poly1305 | 32 B | 24 B | 16 B | libsodium.js | +| File decryption | XSalsa20-Poly1305 | 32 B | 24 B | 16 B | libsodium.js | +| Transit decryption (download) | XSalsa20-Poly1305 (streaming: `cbInit` + `sbDecryptChunk`) | DH shared secret | 24 B | 16 B | libsodium.js | +| Command signing | Ed25519 | 64 B (private) | — | 64 B (sig) | libsodium.js | +| DH key exchange | X25519 | 32 B | — | — | libsodium.js | +| Chunk digest | SHA-256 | — | — | 32 B | Web Crypto API | +| File digest | SHA-512 | — | — | 64 B | Web Crypto API | +| Random bytes | ChaCha20-DRBG | — | — | — | libsodium.js `randombytes_buf` | + +**Streaming encryption detail:** + +The Haskell implementation uses a custom streaming wrapper over XSalsa20-Poly1305: +1. Initialize: `(xsalsa20_state, poly1305_state) = sbInit(key, nonce)` + - Generate 32-byte Poly1305 key from first XSalsa20 output block + - Initialize Poly1305 state with this key +2. Encrypt chunk: XOR plaintext with XSalsa20 keystream, update Poly1305 with ciphertext +3. Finalize: Compute 16-byte Poly1305 tag, append to stream + +This is NOT compatible with standard NaCl `crypto_secretbox` (see §11.2). The TypeScript implementation must reimplement the exact streaming logic using libsodium's low-level XSalsa20 and Poly1305 APIs. See §12.4 for the complete function mapping. + +### 5.4 Transport via fetch() + +Each XFTP command is an HTTP/2 POST request: + +```typescript +async function sendXFTPCommand( + serverUrl: string, + commandBlock: Uint8Array, // 16384 bytes, padded + fileChunk?: ReadableStream // optional, for FPUT +): Promise<{ responseBlock: Uint8Array; body?: ReadableStream }> { + + const bodyStream = fileChunk + ? concatStreams(streamFromBytes(commandBlock), fileChunk) + : streamFromBytes(commandBlock); + + const response = await fetch(serverUrl, { + method: 'POST', + body: bodyStream, + duplex: 'half', // Required for streaming request bodies + // No Content-Type header — binary protocol + }); + + const reader = response.body!.getReader(); + const responseBlock = await readExactly(reader, 16384); + const body = hasMoreData(reader) ? wrapAsStream(reader) : undefined; + + return { responseBlock, body }; +} +``` + +**Browser compatibility for streaming uploads:** +- Chrome 105+, Edge 105+: `fetch()` with `ReadableStream` body + `duplex: 'half'` +- Firefox 102+: Supported +- Safari 16.4+: Supported + +For older browsers, fall back to `ArrayBuffer` body (buffer entire chunk in memory). + +### 5.5 Upload Orchestration + +``` +1. Read file via File API (drag-drop or file picker) +2. Validate size ≤ 100 MB +3. Generate random SbKey (32 bytes) + CbNonce (24 bytes) +4. Create FileHeader { fileName } +5. Encrypt file (see §12.8 for algorithm detail): + a. Init streaming state: `sbInit(key, nonce)` + b. Encrypt `smpEncode(fileSize') <> headerBytes` where `fileSize'` = headerLen + originalFileSize + c. Encrypt file data in 65536-byte chunks (threaded state) + d. Encrypt `'#'` padding in 65536-byte chunks to fill `encSize - authTagSize - fileSize' - 8` + e. Finalize: `sbAuth(state)` → append 16-byte auth tag +6. Compute SHA-512 digest of encrypted data +7. Split into chunks using prepareChunkSizes algorithm: + - > 75% of 4MB → 4MB chunks + - > 75% of 1MB → 1MB + 4MB chunks + - Otherwise → 64KB + 256KB chunks +8. For each chunk (parallel, up to 8 concurrent): + a. Generate Ed25519 sender keypair + b. Generate Ed25519 recipient keypair (1 recipient for web) + c. Compute SHA-256 chunk digest + d. Connect to XFTP server (handshake if new connection) + e. Send FNEW { sndKey, size, digest } + recipient keys → receive (senderId, [recipientId]) + f. Send FPUT with chunk data → receive OK + g. Report progress +9. Build FileDescription YAML from all chunk metadata +10. If YAML size (compressed+encoded) > threshold: + a. Encrypt YAML as a file + b. Upload encrypted YAML (single chunk) → get redirect description + c. Use redirect description for URL +11. Compress + base64url encode description +12. Display URL: https://example.com/file/# +``` + +### 5.6 Download Orchestration + +``` +1. Parse URL hash fragment +2. Base64url decode + decompress → YAML +3. Parse YAML → FileDescription +4. Validate description (sequential chunks, sizes match) +5. If redirect field present: + a. Download redirect file (single chunk) + b. Decrypt, validate size+digest, parse inner description + c. Continue with inner description +6. For each chunk (parallel, up to 8 concurrent): + a. Generate ephemeral X25519 keypair + b. Connect to XFTP server (web handshake) + c. Send FGET { recipientDhPubKey } → receive (serverDhPubKey, cbNonce) + encrypted body + d. Compute DH shared secret + e. Transit-decrypt chunk body (XSalsa20-Poly1305 with DH secret) + f. Verify chunk digest (SHA-256) + g. Send FACK → receive OK + h. Report progress +7. Concatenate all transit-decrypted chunks (in order) → encrypted file +8. Verify file digest (SHA-512) +9. File-decrypt entire stream (XSalsa20-Poly1305 with file key + nonce) +10. Extract FileHeader → get original fileName +11. Trigger browser download (Blob + or File System Access API) +``` + +## 6. XFTP Server Changes + +### 6.1 SNI-Based Certificate Switching + +The SMP server already implements SNI-based certificate switching (see `Transport/Server.hs:255-269`). The same mechanism must be added to the XFTP server. + +**Current SMP implementation:** +```haskell +T.onServerNameIndication = case sniCredential of + Nothing -> \_ -> pure $ T.Credentials [credential] + Just sniCred -> \case + Nothing -> pure $ T.Credentials [credential] + Just _host -> T.Credentials [sniCred] <$ atomically (writeTVar sniCredUsed True) +``` + +**XFTP changes needed:** +1. Add `httpCredentials :: Maybe T.Credential` to `XFTPServerConfig`. +2. Add configuration section `[WEB]` to `file-server.ini` for HTTPS cert/key paths. +3. Create `TLSServerCredential` with both XFTP and web certificates. +4. Pass combined credentials to `runHTTP2Server` → `runTransportServerState_`. +5. Use `sniCredUsed` flag to distinguish web vs. native clients. + +**Certificate setup:** +- XFTP identity certificate: Existing self-signed CA chain (used for protocol identity via fingerprint). +- Web certificate: Standard CA-issued TLS certificate (e.g., Let's Encrypt) for the server's FQDN. +- Both certificates served on the same port (443). + +### 6.2 CORS Support + +Browsers enforce same-origin policy. The web page (served from `example.com`) must make cross-origin requests to XFTP servers (`xftp1.simplex.im`, etc.). + +**Required server changes:** + +1. **Handle OPTIONS preflight requests:** + ``` + OPTIONS / + Response headers: + Access-Control-Allow-Origin: * + Access-Control-Allow-Methods: POST, OPTIONS + Access-Control-Allow-Headers: Content-Type + Access-Control-Max-Age: 86400 + Response body: empty + Response status: 200 + ``` + +2. **Add CORS headers to all POST responses (when Origin header present):** + ``` + Access-Control-Allow-Origin: * + Access-Control-Expose-Headers: * + ``` + +3. **Implementation location:** In `runHTTP2Server` handler or a wrapper around the XFTP request handler. Detect the `Origin` header → add CORS headers. This can be conditional on web mode being enabled in config. + +**Security consideration:** `Access-Control-Allow-Origin: *` is safe here because: +- All XFTP commands require Ed25519 authentication (per-chunk keys from file description). +- No cookies or browser credentials are involved. +- File content is end-to-end encrypted. + +### 6.3 Web Handshake with Server Identity Proof + +**Both SNI and web handshake are required.** They solve different problems: + +1. **SNI certificate switching** is required because browsers reject self-signed certificates. The XFTP identity certificate is self-signed (CA chain with offline root), so the server must present a standard CA-issued web certificate (e.g., Let's Encrypt) when a browser connects. SNI is how the server detects this. + +2. **Web handshake with challenge-response** is required because browsers cannot access the TLS certificate fingerprint or the TLS-unique channel binding (`sessionId`). The native client validates XFTP identity by checking the certificate chain fingerprint against the known `keyHash` and binding it to the TLS session. The browser gets none of this — it only knows TLS succeeded with some CA-issued cert. So the XFTP identity must be proven at the protocol level. + +**Standard handshake (unchanged for native clients):** +``` +1. Client → empty POST body → Server +2. Server → padded { vRange, sessionId, CertChainPubKey } → Client +3. Client → padded { version, keyHash } → Server +4. Server → empty → Client +``` + +**Web handshake (new, when SNI is detected):** +``` +1. Client → padded { challenge: 32 random bytes } → Server +2. Server → padded { vRange, sessionId, CertChainPubKey } (header block) + + extended body { fullCertChain, signature(challenge ++ sessionId) } → Client +3. Client validates: + - Certificate chain CA fingerprint matches known keyHash + - Signature over (challenge ++ sessionId) is valid under cert's public key + - This proves: server controls XFTP identity key AND is live (not replay) +4. Client → padded { version, keyHash } → Server +5. Server → empty → Client +``` + +**Detection mechanism:** The server detects web clients by the `sniCredUsed` flag (already available from the TLS layer). When SNI is detected, the server expects a challenge in the first POST body (non-empty, unlike standard handshake where it is empty). No marker byte is needed — SNI presence is the discriminator. + +**Block size note:** The XFTP block size is 16384 bytes (`Protocol.hs:65`). The XFTP identity certificate chain fits within this block. The signed challenge response is sent as an extended body (streamed after the 16384-byte header block), same mechanism as file chunk data. + +### 6.4 Protocol Version and Handshake Extension + +Current XFTP versions: v1 (initial), v2 (auth commands), v3 (blocked files). These version numbers refer to wire encoding format changes, not handshake changes. + +The XFTP handshake is binary-encoded via the `Encoding` typeclass (`Transport.hs:128-142`). Both `XFTPServerHandshake` and `XFTPClientHandshake` parsers end with `Tail _compat <- smpP`, which consumes any remaining bytes. This `Tail` extension field allows adding new fields to the handshake without breaking existing parsers — old clients/servers simply ignore the extra bytes. + +**No protocol version bump is needed** for the web handshake. The web handshake is detected via SNI (transport layer), and the challenge/response extension can use the existing `Tail` field. When SNI is detected: +1. Use web TLS certificate (existing SNI mechanism). +2. Expect challenge in first POST body (non-empty body = web client). +3. Include certificate proof in response extended body. +4. Add CORS headers to all responses for this connection. + +### 6.5 Serving the Static Page + +The XFTP server can optionally serve the static web page itself (similar to how SMP servers serve info pages). When a browser connects via SNI and sends a GET request (not POST), the server serves the HTML/JS/CSS bundle. + +This can be implemented identically to the SMP server's static page serving (`apps/smp-server/web/Static.hs`), using Warp to handle HTTP requests on the same TLS connection. + +Alternatively, the page is hosted on a separate web server (e.g., `files.simplex.chat`). The XFTP servers only need to handle XFTP protocol requests (POST) with CORS headers. + +## 7. Security Analysis + +### 7.1 Threat Model + +| Threat | Mitigation | Residual Risk | +|--------|-----------|---------------| +| Page substitution (malicious JS) | HTTPS, CSP, SRI; IPFS hosting with fingerprints in multiple locations | If web server is compromised and IPFS is not used, all guarantees lost. Fundamental limitation of web-based E2E crypto, mitigated by IPFS. | +| MITM between browser and XFTP server | XFTP identity verification via challenge-response handshake | Attacker can relay traffic (see §7.2) but cannot read file content due to E2E encryption. | +| File description leakage | Hash fragment (`#`) is never sent to server | If browser extension or malware reads URL bar, description is exposed. | +| Server learns file content | File encrypted client-side before upload (XSalsa20-Poly1305) | Server sees encrypted chunks only. | +| Traffic analysis | File size visible to network observers | Same as native XFTP client. | + +### 7.2 Relay Attack Analysis + +An attacker who controls the network could relay all traffic between the browser and the real XFTP server: + +1. Browser sends challenge to "attacker's server" +2. Attacker relays to real server +3. Real server signs challenge + sessionId with XFTP identity key +4. Attacker relays signed response to browser +5. Browser validates ✓ (signature is from the real server) + +However, the attacker **cannot read file content** because: +- File encryption key is in the hash fragment (never sent over network) +- Transit encryption uses DH key exchange (FGET) — attacker doesn't have server's DH private key +- The attacker can observe transfer sizes and timing, but this is already visible via traffic analysis + +The relay attack is equivalent to a passive network observer, which is the same threat model as native XFTP. + +### 7.3 Comparison with Native Client Security + +| Property | Native Client | Web Client | +|----------|--------------|------------| +| TLS certificate validation | XFTP identity cert via fingerprint pinning | Web CA cert via browser + XFTP identity via challenge-response | +| Session binding | TLS-unique binds to XFTP identity cert | TLS-unique binds to web cert; challenge binds to XFTP identity | +| Code integrity | Binary signed/distributed via app stores | Served over HTTPS; SRI for subresources; IPFS hosting option; vulnerable to server compromise | +| File encryption | XSalsa20-Poly1305 | Same | +| Transit encryption | DH + XSalsa20-Poly1305 | Same | + +### 7.4 Layman Security Summary (Displayed on Page) + +The web page should display a brief, non-technical security summary explaining to users: +- Files are encrypted in the browser before upload — the server never sees file contents. +- The file link (URL) contains the decryption key in the hash fragment, which the browser never sends to any server. +- Only someone with the exact link can download and decrypt the file. +- The main risk is if the web page itself is tampered with (page substitution attack). IPFS hosting mitigates this. +- For maximum security, use the SimpleX app instead. + +## 8. Implementation Approach Discussion + +### 8.1 Option 1: Haskell to WASM + +**Verdict: Not practical.** + +- Template Haskell is used extensively (`Data.Aeson.TH`, `deriveJSON`) — incompatible with GHC WASM backend. +- Deep dependencies on STM, IORef, SQLite (for agent) — would need extensive modification. +- GHC WASM backend is experimental, large binary output (~10+ MB). +- Hard to debug in browser context. + +### 8.2 Option 2: TypeScript Reimplementation (Recommended) + +**Verdict: Best approach.** + +- Well-understood, readable, auditable by the community. +- Rich crypto ecosystem (libsodium.js provides all needed NaCl primitives as WASM). +- Direct access to browser APIs (fetch, File, ReadableStream, Blob). +- Testable in Node.js against Haskell XFTP server. +- Small bundle size (~200 KB with libsodium WASM). + +**Risk:** Exact byte-level wire compatibility requires careful encoding implementation and thorough testing against the Haskell server. + +### 8.3 Option 3: C to WASM + +**Verdict: Viable but unnecessary.** + +- Could use libsodium C code directly for crypto (faster, reference implementation). +- But protocol encoding + YAML + orchestration still needs a higher-level language. +- Emscripten toolchain adds build complexity. +- In practice, libsodium.js already IS C-to-WASM, so Option 2 gets this benefit. + +### 8.4 Option 4: Hybrid (TypeScript + C/WASM crypto) + +**Verdict: This IS Option 2**, since libsodium.js is WASM-compiled C. The TypeScript code calls into WASM for crypto, implements protocol/transport/orchestration in TypeScript. + +## 9. Implementation Plan + +### Phase 1: TypeScript XFTP Building Blocks — DONE + +**Goal:** All per-function building blocks implemented and tested via Haskell-driven unit tests. + +**Completed** (164 tests passing across 16 test groups): +1. Binary encoding (protocol/encoding.ts) — 23 tests +2. Crypto: secretbox, keys, file, padding, digest (crypto/*.ts) — 72 tests +3. Protocol: commands, transmission (protocol/commands.ts, transmission.ts) — 40 tests +4. Handshake encoding/decoding (protocol/handshake.ts) — 18 tests +5. Identity proof verification (crypto/identity.ts) — 15 tests +6. File descriptions: types, YAML, validation (protocol/description.ts) — 13 tests +7. Chunk sizing: prepareChunkSizes, singleChunkSize, etc. (protocol/chunks.ts) — 4 tests +8. Transport crypto: cbAuthenticate/cbVerify, transit encrypt/decrypt (protocol/client.ts) — 10 tests +9. Server address parsing (protocol/address.ts) — 3 tests +10. Download helpers: DH, transit-decrypt, file-decrypt (download.ts) — 11 tests + +### Phase 2: XFTP Server Changes — DONE + +**Goal:** XFTP servers support web client connections. + +**Completed** (7 Haskell integration tests passing): +1. SNI certificate switching — `TLSServerCredential` mechanism for XFTP +2. CORS headers — OPTIONS handler + CORS response headers +3. Web handshake — challenge-response identity proof (Ed25519 + Ed448) +4. Integration tests — Ed25519 and Ed448 web handshake round-trips + +### Phase 3: HTTP/2 Client + Agent Orchestration + +**Goal:** Complete XFTP client that can upload and download files against a real Haskell XFTP server. + +1. **`client.ts`** ← `Simplex.FileTransfer.Client` — HTTP/2 client via `fetch()` / `node:http2`: connect + handshake, sendCommand, createChunk, uploadChunk, downloadChunk, deleteChunk, ackChunk, ping. +2. **`agent.ts`** ← `Simplex.FileTransfer.Client.Main` — Upload orchestration (encrypt → chunk → register → upload → build description), download orchestration (parse → download → verify → decrypt → ack), URL encoding with DEFLATE compression (§4.1). + +### Phase 4: Integration Testing + +**Goal:** Prove the TypeScript client is wire-compatible with the Haskell server. + +1. **Test harness** — Haskell-driven tests in `XFTPWebTests.hs` (same pattern as per-function tests). +2. **Upload test** — TypeScript uploads file → Haskell client downloads it → verify contents match. +3. **Download test** — Haskell client uploads file → TypeScript downloads it → verify contents match. +4. **Round-trip test** — TypeScript upload → TypeScript download → verify. +5. **Edge cases** — Single chunk, many chunks, exactly-sized chunks, redirect descriptions. + +### Phase 5: Web Page + +**Goal:** Static HTML page with upload/download UX. + +1. **Bundle TypeScript** — Compile to ES module bundle with libsodium.js WASM included. +2. **Upload UI** — Drag-drop zone, file picker, progress circle, link display. +3. **Download UI** — Parse URL, show file info, download button, progress circle. +4. **App install CTA** — Banner/messaging promoting SimpleX app for larger files. + +### Phase 6: Server-Hosted Page (Optional) + +**Goal:** XFTP servers can optionally serve the web page themselves. + +1. **Static file serving** — Similar to SMP server's `attachStaticFiles`. +2. **GET handler** — When web client sends HTTP GET (not POST), serve HTML page. +3. **Page generation** — Embed page bundle at server build time. + +## 10. Testing Strategy + +### 10.1 Per-Function Unit Tests (Haskell-driven) + +**Haskell is the test driver.** For each TypeScript function, there is one Haskell test case that: +1. Calls the Haskell function with known (or random) input → gets expected output. +2. Calls the same-named TypeScript function via `node` → gets actual output. +3. Asserts byte-identical results. + +This means **zero special test code on the TypeScript side** — node just `require`s the production module and calls the exported function. The Haskell test file is pure boilerplate. + +**Haskell helper** (defined once in the test file): +```haskell +callTS :: FilePath -> String -> ByteString -> IO ByteString +callTS modulePath funcName inputHex = do + let script = "const m = require('./" <> modulePath <> "'); " + <> "process.stdout.write(m." <> funcName + <> "(Buffer.from('" <> B.unpack (Base16.encode inputHex) <> "', 'hex')))" + (_, Just hout, _, ph) <- createProcess (proc "node" ["-e", script]) + {std_out = CreatePipe, cwd = Just xftpWebDir} + out <- B.hGetContents hout + void $ waitForProcess ph + pure out +``` + +**Example test cases:** +```haskell +describe "protocol/encoding" $ do + it "encodeWord16" $ do + let expected = smpEncode (42 :: Word16) + actual <- callTS "src/protocol/encoding" "encodeWord16" (smpEncode (42 :: Word16)) + actual `shouldBe` expected + +describe "crypto/secretbox" $ do + it "sbEncryptTailTag" $ do + let Right expected = LC.sbEncryptTailTag testKey testNonce testData testLen testPadLen + actual <- callTS "src/crypto/secretbox" "sbEncryptTailTag" + (smpEncode testKey <> smpEncode testNonce <> testData <> smpEncode testLen <> smpEncode testPadLen) + actual `shouldBe` LB.toStrict expected + it "sbEncryptTailTag round-trip" $ do + let Right ct = LC.sbEncryptTailTag testKey testNonce testData testLen testPadLen + actual <- callTS "src/crypto/secretbox" "sbDecryptTailTag" + (smpEncode testKey <> smpEncode testNonce <> smpEncode testPadLen <> LB.toStrict ct) + actual `shouldBe` LB.toStrict testData + +describe "crypto/padding" $ do + it "pad" $ do + let Right expected = C.pad testMsg 16384 + actual <- callTS "src/crypto/padding" "pad" (encodeTestArgs testMsg (16384 :: Int)) + actual `shouldBe` expected +``` + +**Each row in §12.1–12.17 function mapping tables becomes a test case.** The tables serve as the test case list. + +**Development workflow:** Implement one TS function → run its Haskell test → fix until it passes → move to next function. Bottom-up confidence building. No guessing what's broken. + +**Test execution:** Tests live in `tests/XFTPWebTests.hs` in the simplexmq repo, skipped by default (require compiled TS project path). Run with: +```bash +cabal test --ghc-options -O0 --test-option=--match="/XFTP Web Client/" +``` + +**Random inputs:** Haskell tests can use QuickCheck to generate random inputs each run, not just hardcoded values. This catches edge cases that fixed test vectors miss. + +### 10.2 Integration Tests (TS-driven, spawns Haskell server) + +**Only attempted after all per-function tests (§10.1) pass.** These are end-to-end tests that verify the full upload/download pipeline works against a real XFTP server. + +**Approach:** Node.js test (`xftp-web/test/integration.test.ts`) spawns `xftp-server` and `xftp` CLI as subprocesses. + +``` +┌────────────────────────────────────────────────────────────────┐ +│ Node.js test process (integration.test.ts) │ +│ │ +│ 1. Spawn xftp-server subprocess │ +│ 2. Run TypeScript XFTP client (under test) ──── HTTP/2 ────┐ │ +│ 3. Spawn xftp CLI to download/verify │ │ │ +│ │ │ │ +│ ┌──────────────────────┐ ┌─────────────────▼──────────┐ │ │ +│ │ xftp CLI (Haskell) │ │ xftp-server (Haskell) │ │ │ +│ │ (verify/upload) │◄───│ (subprocess) │ │ │ +│ └──────────────────────┘ └────────────────────────────┘ │ │ +└────────────────────────────────────────────────────────────────┘ +``` + +**Test scenarios:** +1. TypeScript uploads → Haskell `xftp` CLI downloads → content verified. +2. Haskell `xftp` CLI uploads → TypeScript downloads → content verified. +3. TypeScript upload + download round-trip. +4. Web handshake with challenge-response validation. +5. Redirect descriptions (large file → compressed description upload). +6. Multiple chunks across multiple servers. +7. Error cases: expired file, auth failure, digest mismatch. + +### 10.3 Browser Tests + +- Manual testing in Chrome, Firefox, Safari. +- Automated via Playwright or Puppeteer (optional, for CI). +- Focus on: streaming upload/download, progress reporting, URL parsing, CORS. + +### 10.4 Test Ordering (Bottom-Up) + +The per-function tests (§10.1) must pass before attempting integration tests (§10.2). Implementation and testing order: + +1. **Encoding primitives** — `encodeWord16`, `encodeBytes`, `encodeLarge`, `pad`, `unPad` (§12.1, §12.7) +2. **Crypto primitives** — `sha256`, `sha512`, `sign`, `verify`, `dh`, key generation (§12.5, §12.6) +3. **Streaming crypto** — `sbInit`, `sbEncryptChunk`, `sbDecryptChunk`, `sbAuth` (§12.4) +4. **File crypto** — `padLazy`, `unPadLazy` (§12.7), then `encryptFile`, `decryptChunks` (§12.8 — uses streaming crypto from step 3, not padLazy) +5. **Protocol encoding** — command/response encoding, transmission framing (§12.2, §12.3) +6. **Handshake** — handshake type encoding/decoding (§12.9) +7. **Description** — YAML serialization, validation (§12.12–§12.14) +8. **Chunk sizing** — `prepareChunkSizes`, `getChunkDigest` (§12.11) +9. **Transport client** — `sendCommand`, `createChunk`, `uploadChunk`, `downloadChunk` (§12.10) +10. **Integration** — full upload/download round-trips (§10.2) + +## 11. Resolved Design Decisions + +### 11.1 Block Size + +The XFTP block size is 16384 bytes (`Protocol.hs:65`). The XFTP identity certificate chain fits within a single block. The signed challenge response for web handshake is sent as an extended body after the header block. + +### 11.2 Streaming Encryption Compatibility + +**The Haskell streaming XSalsa20-Poly1305 is NOT compatible with standard NaCl `crypto_secretbox`.** Analysis of `Crypto/Lazy.hs` confirms: + +- `SbState` (line 196) is `(XSalsa.State, Poly1305.State)` — explicit state pair. +- `sbInit` (line 202) generates a 32-byte Poly1305 key from the first XSalsa20 keystream block, then initializes both states. +- `sbEncryptChunk` (line 229) XORs plaintext with keystream and updates Poly1305 with the ciphertext. +- `sbAuth` (line 241) finalizes Poly1305 → 16-byte auth tag. +- **Auth tag is appended at the END** for files (`sbEncryptTailTag`, line 134), unlike standard NaCl which prepends it. +- Standard `crypto_secretbox` produces `tag ++ ciphertext`; this produces `ciphertext ++ tag`. + +The TypeScript implementation must reimplement the exact streaming logic using libsodium's low-level XSalsa20 and Poly1305 APIs. `crypto_secretbox_easy` cannot be used. + +### 11.3 Web Client Detection + +Both SNI and web handshake are mandatory (see §6.3). SNI detection (`sniCredUsed` flag) is the discriminator — when SNI is detected, the server expects the web handshake variant. + +### 11.4 URL Compression + +DEFLATE (raw, no gzip/zlib wrapper). Available in modern browsers via `DecompressionStream`. Modern browsers only — no polyfill needed. + +### 11.5 Testing Architecture + +Two levels: (1) Haskell-driven per-function tests (`tests/XFTPWebTests.hs`) that call each TS function via `node` and compare output with the Haskell equivalent — zero TS test code needed, see §10.1. (2) TS-driven integration tests (`xftp-web/test/integration.test.ts`) that spawn `xftp-server` and `xftp` CLI as subprocesses for full round-trip verification — only attempted after all per-function tests pass, see §10.2. + +### 11.6 Memory Management for 100 MB Files + +XSalsa20-Poly1305 streaming encryption/decryption is sequential — each 64KB block's state depends on the previous block, and the auth tag is computed/verified at the end. This means both upload and download have the same structure: one sequential crypto pass + one parallel network pass. + +**Upload flow:** +1. `File.stream()` → encrypt sequentially (state threading) → buffer encrypted output +2. Compute SHA-512 digest of encrypted data +3. Split into chunks, upload in parallel to 8 randomly selected servers (from 6 default servers in `Presets.hs`) + +**Download flow:** +1. Download chunks in parallel from servers → buffer encrypted data +2. Decrypt sequentially (state threading) → verify auth tag +3. Trigger browser save + +Both directions buffer ~100 MB of encrypted data. The approach should be symmetric. + +**Option A — Memory buffer:** Buffer encrypted data as `ArrayBuffer`. 100 MB peak memory is feasible on modern devices. Simple implementation, no Web Worker needed. Chunk slicing is zero-copy via `ArrayBuffer.slice()`. + +**Option B — OPFS ([Origin Private File System](https://developer.mozilla.org/en-US/docs/Web/API/File_System_API/Origin_private_file_system)):** Write encrypted data to OPFS instead of holding in memory. OPFS storage quota is shared with IndexedDB/Cache API — typically hundreds of MB to several GB ([quota details](https://developer.mozilla.org/en-US/docs/Web/API/Storage_API/Storage_quotas_and_eviction_criteria)). The fast synchronous API (`createSyncAccessHandle()`) requires a [Web Worker](https://developer.mozilla.org/en-US/docs/Web/API/FileSystemFileHandle/createSyncAccessHandle) but is [3-4x faster than IndexedDB](https://web.dev/articles/origin-private-file-system). The async API (`createWritable()`) works on the main thread. + +**Decision:** Use OPFS with a Web Worker. While 100 MB fits in memory, OPFS future-proofs the implementation for raising the file size limit (250 MB, 500 MB, etc.) without code changes. The Web Worker also keeps the main thread responsive during encryption/decryption. The implementation cost is modest — a single worker that runs the sequential crypto pipeline, reading/writing OPFS files. + +### 11.7 Server Page Hosting + +Excluded from initial implementation. Added at the very end (Phase 5) as optional feature. Initial deployment serves the page from a separate web host. + +### 11.8 File Expiry Communication + +Hardcode 48 hours for standalone web page. Server-hosted page can use server-configurable TTL. The page should also display which XFTP servers were used for the upload. + +### 11.9 Concurrent Operations + +8 parallel operations in the browser. The Haskell CLI uses 16, but browsers have per-origin connection limits (6-8). Since chunks typically go to different servers (different origins), 8 provides good parallelism without hitting browser limits. + +## 12. Haskell-to-TypeScript Function Mapping + +This section maps every TypeScript module to the Haskell functions it must reimplement. File paths are relative to `src/`. Line numbers reference the current codebase. Each TypeScript function must produce byte-identical output to its Haskell counterpart — this is transpilation, not reimplementation. + +### 12.1 `protocol/encoding.ts` ← `Simplex/Messaging/Encoding.hs` + +Binary encoding primitives. Every XFTP type's wire format is built from these. + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `encodeWord16(n)` | `smpEncode :: Word16` | 70 | 2-byte big-endian | +| `decodeWord16(buf)` | `smpP :: Word16` | 70 | Parse 2-byte big-endian | +| `encodeWord32(n)` | `smpEncode :: Word32` | 76 | 4-byte big-endian | +| `decodeWord32(buf)` | `smpP :: Word32` | 76 | Parse 4-byte big-endian | +| `encodeInt64(n)` | `smpEncode :: Int64` | 82 | Two Word32s (high, low) | +| `decodeInt64(buf)` | `smpP :: Int64` | 82 | Parse two Word32s | +| `encodeBytes(bs)` | `smpEncode :: ByteString` | 100 | 1-byte length prefix + bytes | +| `decodeBytes(buf)` | `smpP :: ByteString` | 100 | Parse 1-byte length prefix | +| `encodeLarge(bs)` | `smpEncode :: Large` | 133 | 2-byte length prefix + bytes | +| `decodeLarge(buf)` | `smpP :: Large` | 133 | Parse 2-byte length prefix | +| `encodeTail(bs)` | `smpEncode :: Tail` | 124 | Raw bytes (no prefix) | +| `decodeTail(buf)` | `smpP :: Tail` | 124 | Take all remaining bytes | +| `encodeBool(b)` | `smpEncode :: Bool` | 58 | `'T'` or `'F'` | +| `decodeBool(buf)` | `smpP :: Bool` | 58 | Parse `'T'`/`'F'` | +| `encodeString(s)` | `smpEncode :: String` | 159 | Via ByteString encoding | +| `encodeMaybe(enc, v)` | `smpEncode :: Maybe a` | 114 | `'0'` for Nothing, `'1'` + value for Just | +| `decodeMaybe(dec, buf)` | `smpP :: Maybe a` | 114 | Parse optional value | +| `encodeNonEmpty(enc, xs)` | `smpEncode :: NonEmpty a` | 165 | 1-byte length + elements | +| `decodeNonEmpty(dec, buf)` | `smpP :: NonEmpty a` | 165 | Parse length-prefixed list | + +**Tuple encoding:** Tuples are encoded by concatenating encoded fields. Decoded by parsing fields sequentially. Instances at lines 172-212. + +### 12.2 `protocol/commands.ts` ← `Simplex/FileTransfer/Protocol.hs` + +XFTP commands and their wire encoding. + +| TypeScript type/function | Haskell type/function | Line | Description | +|---|---|---|---| +| `FileInfo` | `FileInfo` | 174 | `{sndKey, size :: Word32, digest :: ByteString}` | +| `encodeFNEW(info, rcvKeys, auth)` | `FNEW` encoding | 183 | `smpEncode (FNEW_)` + fields | +| `encodeFADD(rcvKeys)` | `FADD` encoding | 183 | Add recipient keys | +| `encodeFPUT()` | `FPUT` encoding | 183 | Upload marker (no fields) | +| `encodeFDEL()` | `FDEL` encoding | 183 | Delete marker | +| `encodeFGET(dhPubKey)` | `FGET` encoding | 183 | Download with DH key | +| `encodeFACK()` | `FACK` encoding | 183 | Acknowledge marker | +| `encodePING()` | `PING` encoding | 183 | Ping marker | +| `decodeFRSndIds(buf)` | `FRSndIds` parser | 285 | `(SenderId, NonEmpty RecipientId)` | +| `decodeFRRcvIds(buf)` | `FRRcvIds` parser | 285 | `NonEmpty RecipientId` | +| `decodeFRFile(buf)` | `FRFile` parser | 285 | `(RcvPublicDhKey, CbNonce)` | +| `decodeFROk()` | `FROk` parser | 285 | Success | +| `decodeFRErr(buf)` | `FRErr` parser | 285 | Error type | +| `decodeFRPong()` | `FRPong` parser | 285 | Pong | +| `XFTPErrorType` | `XFTPErrorType` | 206 | Error enumeration (Transport.hs) | + +**Command tags** (`FileCommandTag`, line 103): Each command is prefixed by its tag string (`"FNEW"`, `"FADD"`, etc.) encoded via `smpEncode`. + +### 12.3 `protocol/transmission.ts` ← `Simplex/FileTransfer/Protocol.hs` + +Transmission framing: sign, encode, pad to block size. + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `xftpEncodeAuthTransmission(key, ...)` | `xftpEncodeAuthTransmission` | 340 | Sign + encode + pad to 16384 | +| `xftpDecodeTransmission(buf)` | `xftpDecodeTransmission` | 360 | Parse padded response block | +| `xftpBlockSize` | `xftpBlockSize` | 65 | `16384` constant | + +**Wire format:** ` ` padded with `#` to 16384 bytes. Signature is Ed25519 over `(sessionId ++ corrId ++ entityId ++ encodedCommand)`. + +**Padding:** Uses `Crypto.pad` (`Crypto.hs:1077`) — 2-byte big-endian length prefix + message + `#` (0x23) fill. + +### 12.4 `crypto/secretbox.ts` ← `Simplex/Messaging/Crypto.hs` + `Simplex/Messaging/Crypto/Lazy.hs` + +Streaming XSalsa20-Poly1305 encryption/decryption. + +| TypeScript function | Haskell function | File | Line | Description | +|---|---|---|---|---| +| `sbInit(key, nonce)` | `sbInit` | Crypto/Lazy.hs | 202 | Init `(XSalsa.State, Poly1305.State)` | +| `cbInit(dhSecret, nonce)` | `cbInit` | Crypto/Lazy.hs | 198 | Init from DH secret (transit) | +| `sbEncryptChunk(state, chunk)` | `sbEncryptChunk` | Crypto/Lazy.hs | 229 | XOR + Poly1305 update → `(ciphertext, newState)` | +| `sbDecryptChunk(state, chunk)` | `sbDecryptChunk` | Crypto/Lazy.hs | 235 | XOR + Poly1305 update → `(plaintext, newState)` | +| `sbAuth(state)` | `sbAuth` | Crypto/Lazy.hs | 241 | Finalize → 16-byte auth tag | +| `sbEncryptTailTag(key, nonce, data, len, padLen)` | `sbEncryptTailTag` | Crypto/Lazy.hs | 134 | Full encrypt, tag appended | +| `sbDecryptTailTag(key, nonce, paddedLen, data)` | `sbDecryptTailTag` | Crypto/Lazy.hs | 153 | Full decrypt, verify appended tag | +| `cryptoBox(key, iv, msg)` | `cryptoBox` | Crypto.hs | 1313 | XSalsa20 + Poly1305 (tag prepended) | +| `cbEncrypt(dhSecret, nonce, msg, padLen)` | `cbEncrypt` | Crypto.hs | 1286 | Crypto box with DH secret | +| `cbDecrypt(dhSecret, nonce, msg)` | `cbDecrypt` | Crypto.hs | 1320 | Crypto box decrypt | + +**Note:** `cryptoBox`, `cbEncrypt`, and `cbDecrypt` are included for completeness but are **not used by the web XFTP client**. They implement single-shot crypto_box (tag prepended) used for SMP protocol messages. The web client only needs `cbInit` (for transit decryption) and the streaming functions (`sbEncryptChunk`, `sbDecryptChunk`, `sbAuth`, `sbEncryptTailTag`, `sbDecryptTailTag`). + +**Internal init (`sbInit_`)** at `Crypto/Lazy.hs:210`: +1. Call `xSalsa20(key, nonce, zeroes_32)` → `(poly1305Key, xsalsaState)` +2. Initialize Poly1305 with `poly1305Key` +3. Return `(xsalsaState, poly1305State)` + +The `xSalsa20` function (`Crypto.hs:1467`) uses: `initialize 20 secret (zero8 ++ iv0)`, then `derive state0 iv1`, then `generate state1 32` for keystream, `combine state2 msg` for encryption. + +### 12.5 `crypto/keys.ts` ← `Simplex/Messaging/Crypto.hs` + +Key generation, signing, DH. + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `generateEd25519KeyPair()` | `generateAuthKeyPair` | 726 | Ed25519 keypair from CSPRNG | +| `generateX25519KeyPair()` | via `generateKeyPair` | — | X25519 keypair for DH | +| `sign(privateKey, msg)` | `sign'` | 1175 | Ed25519 signature (64 bytes) | +| `verify(publicKey, sig, msg)` | `verify'` | 1270 | Ed25519 verification | +| `dh(pubKey, privKey)` | `dh'` | 1280 | X25519 DH → shared secret | + +**Key types:** +- `SbKey` (`Crypto.hs:1411`): 32-byte symmetric key (newtype over ByteString) +- `CbNonce` (`Crypto.hs:1368`): 24-byte nonce (newtype over ByteString) +- `KeyHash` (`Crypto.hs:981`): SHA-256 of certificate public key + +### 12.6 `crypto/digest.ts` ← `Simplex/Messaging/Crypto.hs` + +Hash functions. + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `sha256(data)` | `sha256Hash` | 1006 | SHA-256 digest (32 bytes) | +| `sha512(data)` | `sha512Hash` | 1011 | SHA-512 digest (64 bytes) | + +### 12.7 `crypto/padding.ts` ← `Simplex/Messaging/Crypto.hs` + `Simplex/Messaging/Crypto/Lazy.hs` + +Block padding used for protocol messages and file encryption. + +| TypeScript function | Haskell function | File | Line | Description | +|---|---|---|---|---| +| `pad(msg, blockSize)` | `pad` | Crypto.hs | 1077 | 2-byte BE length + msg + `#` fill | +| `unPad(buf)` | `unPad` | Crypto.hs | 1085 | Extract msg from padded block | +| `padLazy(msg, msgLen, padLen)` | `pad` | Crypto/Lazy.hs | 70 | 8-byte Int64 length + msg + `#` fill | +| `unPadLazy(buf)` | `unPad` | Crypto/Lazy.hs | 91 | Extract msg from lazy-padded block | + +**Strict pad format (protocol messages):** `[2-byte BE length][message][# # # ...]` +**Lazy pad format (file encryption):** `[8-byte Int64 length][message][# # # ...]` + +### 12.8 `crypto/file.ts` ← `Simplex/FileTransfer/Crypto.hs` + +File-level encryption/decryption orchestrating the streaming primitives. + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `encryptFile(source, header, key, nonce, fileSize, padSize, dest)` | `encryptFile` | 30 | Stream-encrypt file with header, 64KB chunks, appended auth tag | +| `decryptChunks(paddedSize, chunks, key, nonce)` | `decryptChunks` | 57 | Decrypt concatenated chunks, verify auth tag, extract header | +| `readChunks(paths)` | `readChunks` | 113 | Concatenate chunk files | + +**`encryptFile` algorithm** (lines 30-42): +1. Init state: `sbInit(key, nonce)` +2. Encrypt header: `sbEncryptChunk(state, smpEncode(fileSize') <> headerBytes)` — `fileSize'` = headerLen + originalFileSize; `smpEncode(fileSize')` produces the 8-byte Int64 length prefix, which is concatenated with `headerBytes` and encrypted together as one piece +3. Encrypt file data in 65536-byte chunks: `sbEncryptChunk(state, chunk)` → thread state through each chunk +4. Encrypt padding in 65536-byte chunks: same chunked loop as step 3 using `'#'` fill. `padLen = encSize - authTagSize - fileSize' - 8` +5. Finalize: `sbAuth(state)` → append 16-byte auth tag + +Note: `encryptFile` does NOT use `padLazy` or `sbEncryptTailTag`. It manually prepends the length, encrypts header+data+padding as separate chunk sequences, and appends the auth tag. The `sbEncryptTailTag` function (which does use `padLazy`) is used elsewhere but not by `encryptFile`. + +**`decryptChunks` algorithm** (lines 57-111) — two paths: + +**Single chunk (one file, line 60):** Calls `sbDecryptTailTag(key, nonce, encSize - authTagSize, data)` directly. This internally decrypts, verifies auth tag, and strips the 8-byte length prefix + padding via `unPad`. Returns `(authOk, content)`. Then parses `FileHeader` from content. + +**Multi-chunk (line 67):** +1. `sbInit(key, nonce)` → init state +2. Decrypt first chunk file: `sbDecryptChunkLazy(state, chunk)` → `splitLen` extracts 8-byte `expectedLen` → parse `FileHeader` +3. Decrypt middle chunk files: `sbDecryptChunkLazy(state, chunk)` loop, write to output, accumulate `len` +4. Decrypt last chunk file: split off last 16 bytes as auth tag → `sbDecryptChunkLazy(state, remaining)` → truncate padding using `expectedLen` vs accumulated `len` → verify `sbAuth(finalState) == authTag` + +**`FileHeader`** (`Types.hs:35`): `{fileName :: String, fileExtra :: Maybe String}`, parsed via `smpP`. + +### 12.9 `transport/handshake.ts` ← `Simplex/FileTransfer/Transport.hs` + +XFTP handshake types and encoding. + +| TypeScript type/function | Haskell type/function | Line | Description | +|---|---|---|---| +| `XFTPServerHandshake` | `XFTPServerHandshake` | 114 | `{xftpVersionRange, sessionId, authPubKey}` | +| `encodeServerHandshake(hs)` | `smpEncode :: XFTPServerHandshake` | 136 | Binary encode | +| `decodeServerHandshake(buf)` | `smpP :: XFTPServerHandshake` | 136 | Parse with `Tail _compat` (line 142) | +| `XFTPClientHandshake` | `XFTPClientHandshake` | 121 | `{xftpVersion, keyHash}` | +| `encodeClientHandshake(hs)` | `smpEncode :: XFTPClientHandshake` | 128 | Binary encode | +| `decodeClientHandshake(buf)` | `smpP :: XFTPClientHandshake` | 128 | Parse with `Tail _compat` (line 133) | +| `XFTP_VERSION_RANGE` | `supportedFileServerVRange` | 101 | Version 1..3 | +| `CURRENT_XFTP_VERSION` | `currentXFTPVersion` | 98 | Version 3 | + +### 12.10 `protocol/client.ts` ← `Simplex/FileTransfer/Client.hs` (crypto primitives) — DONE + +Transport-level crypto for command authentication and chunk encryption/decryption. + +| TypeScript function | Haskell function | Description | Status | +|---|---|---|---| +| `cbAuthenticate(peerPub, ownPriv, nonce, msg)` | `C.cbAuthenticate` | 80-byte crypto_box authenticator | ✓ | +| `cbVerify(peerPub, ownPriv, nonce, auth, msg)` | `C.cbVerify` | Verify authenticator | ✓ | +| `encryptTransportChunk(dhSecret, nonce, plain)` | `sendEncFile` | Encrypt chunk (tag appended) | ✓ | +| `decryptTransportChunk(dhSecret, nonce, enc)` | `receiveEncFile` | Decrypt chunk (tag verified) | ✓ | + +### 12.11 `protocol/chunks.ts` ← `Simplex/FileTransfer/Chunks.hs` + `Client.hs` — DONE + +Chunk size selection and file splitting. + +| TypeScript function/constant | Haskell equivalent | Status | +|---|---|---| +| `chunkSize0..3` | `chunkSize0..3` (Chunks.hs) | ✓ | +| `serverChunkSizes` | `serverChunkSizes` | ✓ | +| `prepareChunkSizes(size)` | `prepareChunkSizes` (Client.hs:322) | ✓ | +| `singleChunkSize(size)` | `singleChunkSize` (Client.hs:316) | ✓ | +| `prepareChunkSpecs(sizes)` | `prepareChunkSpecs` (Client.hs:339) | ✓ | +| `getChunkDigest(chunk)` | `getChunkDigest` (Client.hs:347) | ✓ | + +### 12.12–12.14 `protocol/description.ts` ← `Simplex/FileTransfer/Description.hs` — DONE + +Types, YAML encode/decode, base64url, FileSize, replica grouping/folding, validation — all in one file. + +| TypeScript function/type | Haskell equivalent | Status | +|---|---|---| +| `FileDescription`, `FileChunk`, `FileChunkReplica`, `RedirectFileInfo` | Matching record types | ✓ | +| `base64urlEncode/Decode` | `strEncode`/`strDecode` for `ByteString` | ✓ | +| `encodeFileSize/decodeFileSize` | `StrEncoding (FileSize a)` | ✓ | +| `encodeFileDescription(fd)` | `encodeFileDescription` (line 230) | ✓ | +| `decodeFileDescription(yaml)` | `decodeFileDescription` (line 356) | ✓ | +| `validateFileDescription(fd)` | `validateFileDescription` (line 221) | ✓ | +| `fdSeparator` | `fdSeparator` (line 111) | ✓ | +| Internal: `unfoldChunksToReplicas`, `foldReplicasToChunks`, `encodeFileReplicas` | Matching functions | ✓ | + +### 12.15 `client.ts` ← `Simplex/FileTransfer/Client.hs` (HTTP/2 operations) + +HTTP/2 XFTP client using `node:http2` (Node.js) or `fetch()` (browser). Transpilation of `Client.hs` network operations. + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `connectXFTP(server, config)` | `getXFTPClient` | 111 | HTTP/2 connect + handshake → XFTPClient state | +| `sendXFTPCommand(client, key, fileId, cmd, chunk?)` | `sendXFTPCommand` | 200 | Encode auth transmission + POST + parse response | +| `createXFTPChunk(client, spKey, info, rcvKeys, auth?)` | `createXFTPChunk` | 232 | FNEW → (SenderId, RecipientId[]) | +| `addXFTPRecipients(client, spKey, fileId, rcvKeys)` | `addXFTPRecipients` | 244 | FADD → RecipientId[] | +| `uploadXFTPChunk(client, spKey, fileId, chunkData)` | `uploadXFTPChunk` | 250 | FPUT with streaming body | +| `downloadXFTPChunk(client, rpKey, fileId, chunkSize)` | `downloadXFTPChunk` | 254 | FGET → DH → transit-decrypt → Uint8Array | +| `deleteXFTPChunk(client, spKey, senderId)` | `deleteXFTPChunk` | 286 | FDEL | +| `ackXFTPChunk(client, rpKey, recipientId)` | `ackXFTPChunk` | 289 | FACK | +| `pingXFTP(client)` | `pingXFTP` | 292 | PING → FRPong | + +**XFTPClient state** (returned by `connectXFTP`): +- HTTP/2 session (node: `ClientHttp2Session`, browser: base URL for fetch) +- `thParams`: `{sessionId, blockSize, thVersion, thAuth}` from handshake +- Server address for reconnection + +**sendXFTPCommand wire format:** +1. `xftpEncodeAuthTransmission(thParams, pKey, (corrId, fId, cmd))` → padded 16KB block +2. POST to "/" with body = block + optional chunk data (streaming) +3. Response: read 16KB `bodyHead`, decode via `xftpDecodeTClient` +4. For FGET: response also has streaming body (encrypted chunk) + +### 12.16 `agent.ts` ← `Simplex/FileTransfer/Client/Main.hs` + +Upload/download orchestration and URL encoding. Combines what the RFC originally split across `agent/upload.ts`, `agent/download.ts`, and `description/uri.ts`. + +**Upload functions:** + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `encryptFileForUpload(file, fileName)` | `encryptFileForUpload` | 264 | key/nonce → encrypt → digest → chunk specs | +| `uploadFile(client, chunkSpecs, servers, numRcps)` | `uploadFile` | 285 | Parallel upload (up to 16 concurrent) | +| `uploadFileChunk(client, chunkNo, spec, server)` | `uploadFileChunk` | 301 | FNEW + FPUT for one chunk | +| `createRcvFileDescriptions(fd, sentChunks)` | `createRcvFileDescriptions` | 329 | Build per-recipient descriptions | +| `createSndFileDescription(fd, sentChunks)` | `createSndFileDescription` | 361 | Build sender (deletion) description | + +**Upload call sequence** (`cliSendFileOpts`, line 243): +1. `encryptFileForUpload` — `randomSbKey` + `randomCbNonce` → `encryptFile` → `sha512Hash` digest → `prepareChunkSpecs` +2. `uploadFile` — for each chunk: generate sender/recipient key pairs, `createXFTPChunk`, `uploadXFTPChunk` +3. `createRcvFileDescriptions` — assemble `FileDescription` per recipient from sent chunks +4. `createSndFileDescription` — assemble sender description with deletion keys + +**Download functions:** + +| TypeScript function | Haskell function | Line | Description | +|---|---|---|---| +| `downloadFile(description)` | `cliReceiveFile` | 388 | Full download: parse → download → verify → decrypt | +| `downloadFileChunk(client, chunk)` | `downloadFileChunk` | 418 | FGET + transit-decrypt one chunk | +| `ackFileChunk(client, chunk)` | `acknowledgeFileChunk` | 440 | FACK one chunk | +| `deleteFile(description)` | `cliDeleteFile` | 455 | FDEL for all chunks | + +**Download call sequence** (`cliReceiveFile`, line 388): +1. Parse and validate `FileDescription` from YAML +2. Group chunks by server +3. Parallel download: `downloadXFTPChunk` per chunk (up to 16 concurrent) +4. Verify file digest (SHA-512) over concatenated encrypted chunks +5. `decryptChunks` — file-level decrypt with auth tag verification +6. Parallel acknowledge: `ackXFTPChunk` per chunk + +**URL encoding (§4.1):** + +| TypeScript function | Description | +|---|---| +| `encodeDescriptionURI(fd)` | DEFLATE compress YAML → base64url → URL hash fragment | +| `decodeDescriptionURI(url)` | Parse hash fragment → base64url decode → inflate → YAML parse | + +### 12.17 Transit Encryption Detail ← `Simplex/FileTransfer/Client.hs:253-275` + +`downloadXFTPChunk` performs transit decryption after FGET: + +1. Generate ephemeral X25519 keypair +2. Send `FGET(rcvDhPubKey)` → receive `FRFile(sndDhPubKey, cbNonce)` + encrypted body +3. Compute DH shared secret: `dh'(sndDhPubKey, rcvDhPrivKey)` (`Crypto.hs:1280`) +4. Transit-decrypt body via `receiveSbFile` (`Transport.hs:176`): `cbInit(dhSecret, cbNonce)` → `sbDecryptChunk` loop (`fileBlockSize` = 16384-byte blocks, `Transport/HTTP2/File.hs:14`) → `sbAuth` tag verification at end +5. Verify chunk digest (SHA-256): `getChunkDigest` (`Client.hs:346`) + +### 12.18 Per-Function Testing: Haskell Drives Node + +**Mechanism:** Haskell test file (`tests/XFTPWebTests.hs`) imports the real Haskell library functions, calls each one, then calls the corresponding TypeScript function via `node`, and asserts byte-identical output. See §10.1 for the `callTS` helper and example test cases. + +**Each row in the tables in §12.1–12.17 is one test case.** The function mapping tables serve as the exhaustive test case list. For example, §12.1 has 19 encoding functions → 19 Haskell test cases. §12.4 has 10 crypto functions → 10 test cases. Total: ~100 per-function test cases across all modules. + +**TS function contract:** Each TypeScript function exported from a module must accept a `Buffer` of serialized input arguments and return a `Buffer` of serialized output. The serialization format is simple concatenation of the same binary encoding used by the protocol (using the encoding primitives from §12.1). This means the TS functions can be called both from production code (with native types) and from the Haskell test harness (with raw buffers). A thin wrapper per module handles deserialization. + +**Stateful functions (streaming crypto):** `XSalsa.State` and `Poly1305.State` are opaque types in the crypton library — they cannot be serialized to bytes. Therefore `sbEncryptChunk` / `sbDecryptChunk` cannot be tested individually across the Haskell↔TS boundary. Instead, test the composite operations: +- `sbEncryptTailTag(key, nonce, data, len, padLen)` — Haskell encrypts, TS encrypts same input, compare ciphertext + tag. +- `sbDecryptTailTag(key, nonce, paddedLen, ciphertext)` — Haskell decrypts, TS decrypts, compare plaintext. +- Round-trip: Haskell encrypts → TS decrypts (and vice versa) → compare content. +- Multi-chunk: Haskell runs `sbInit` + N × `sbEncryptChunk` + `sbAuth` as one sequence, TS does the same, compare final ciphertext and tag. The `callTS` script runs the full sequence in one node invocation. + +**Development workflow:** +1. Implement `encodeWord16` in `src/protocol/encoding.ts` +2. Run `cabal test --ghc-options -O0 --test-option=--match="/XFTP Web Client/encoding/encodeWord16"` +3. If it fails: Haskell says `expected 002a, got 2a00` → immediately know it's an endianness bug +4. Fix → rerun → passes → move to `encodeWord32` +5. Repeat until all per-function tests pass +6. Then attempt integration tests (§10.2) — by this point, every building block is verified + +**Integration tests** (separate, TS-driven via Node.js spawning `xftp-server`): +1. Node.js test spawns `xftp-server` binary as subprocess. +2. TypeScript client connects, uploads file, gets description. +3. Haskell `xftp` CLI (spawned as subprocess) downloads and verifies content. +4. Reverse: Haskell CLI uploads, TypeScript downloads and verifies. +5. Round-trip: TypeScript uploads → TypeScript downloads → verify. + +### 12.19 Project Structure Summary + +**TypeScript project (`xftp-web/`):** +``` +xftp-web/ # Separate npm project +├── src/ +│ ├── protocol/ +│ │ ├── encoding.ts # ← Simplex.Messaging.Encoding ✓ +│ │ ├── commands.ts # ← Simplex.FileTransfer.Protocol (commands+responses) ✓ +│ │ ├── transmission.ts # ← Simplex.FileTransfer.Protocol (framing) ✓ +│ │ ├── handshake.ts # ← Simplex.FileTransfer.Transport (handshake) ✓ +│ │ ├── address.ts # ← Simplex.Messaging.Protocol (server address) ✓ +│ │ ├── chunks.ts # ← Simplex.FileTransfer.Chunks + Client.hs (sizing) ✓ +│ │ ├── client.ts # ← Transport crypto (cbAuth, transit encrypt/decrypt) ✓ +│ │ └── description.ts # ← Simplex.FileTransfer.Description (types+yaml+val) ✓ +│ ├── crypto/ +│ │ ├── secretbox.ts # ← Simplex.Messaging.Crypto + Crypto.Lazy ✓ +│ │ ├── file.ts # ← Simplex.FileTransfer.Crypto ✓ +│ │ ├── keys.ts # ← Simplex.Messaging.Crypto (Ed25519/X25519/Ed448) ✓ +│ │ ├── digest.ts # ← Simplex.Messaging.Crypto (sha256, sha512) ✓ +│ │ ├── padding.ts # ← Simplex.Messaging.Crypto (pad/unPad) ✓ +│ │ └── identity.ts # ← Web handshake identity proof (Ed25519/Ed448) ✓ +│ ├── download.ts # Download helpers (DH, transit-decrypt, file-decrypt) ✓ +│ ├── client.ts # ← Simplex.FileTransfer.Client (HTTP/2 operations) +│ └── agent.ts # ← Simplex.FileTransfer.Client.Main (orchestration) +├── web/ # Browser UI (Phase 5) +│ ├── index.html +│ ├── upload.ts +│ ├── download.ts +│ └── progress.ts # Circular progress component +├── package.json +└── tsconfig.json +``` + +**Haskell tests (in simplexmq repo):** +``` +tests/ +├── XFTPWebTests.hs # Haskell-driven: calls each TS function via node, +│ # compares output with Haskell function (see §10.1) +│ # 164 test cases across 16 test groups +└── fixtures/ed25519/ # Ed25519 test certs for web handshake integration tests +``` + +No fixture files, no TS test harness for unit tests. The Haskell test file IS the test — it calls both Haskell and TypeScript functions directly and compares outputs. TS-side integration tests (`test/integration.test.ts`) are separate and only run after all per-function tests pass. diff --git a/rfcs/2026-01-30-send-file-page/2026-01-31-xftp-web-server-changes.md b/rfcs/2026-01-30-send-file-page/2026-01-31-xftp-web-server-changes.md new file mode 100644 index 0000000000..a1a2f47d54 --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-01-31-xftp-web-server-changes.md @@ -0,0 +1,154 @@ +# XFTP Server: SNI, CORS, and Web Support + +Implementation details for Phase 3 of `rfcs/2026-01-30-send-file-page.md` (sections 6.1-6.4). + +## 1. Overview + +The XFTP server is extended to support web browser clients by: + +1. **SNI-based TLS certificate switching** — Present a CA-issued web certificate (e.g., Let's Encrypt) to browsers, while continuing to present the self-signed XFTP identity certificate to native clients. +2. **CORS headers** — Add CORS response headers on SNI connections so browsers allow cross-origin XFTP requests. +3. **Configuration** — `[WEB]` INI section for HTTPS cert/key paths; opt-in (commented out by default). + +Web handshake (challenge-response identity proof, §6.3 of parent RFC) is not yet implemented and will be added separately. + +## 2. SNI Certificate Switching + +### 2.1 Reusing the SMP Pattern + +The SMP server already implements SNI-based certificate switching via `TLSServerCredential` and `runTransportServerState_` (see `rfcs/2024-09-15-shared-port.md`). The XFTP server applies the same pattern with one key difference: both native and web XFTP clients use HTTP/2 transport, whereas SMP switches between raw SMP protocol and HTTP entirely. + +### 2.2 Approach + +When `httpServerCreds` is configured, the XFTP server bypasses `runHTTP2Server` and uses `runTransportServerState_` directly to obtain the per-connection `sniUsed` flag. It then sets up HTTP/2 manually on each TLS connection using `withHTTP2` (same internals as `runHTTP2ServerWith_`). The `sniUsed` flag is captured in the closure and shared by all HTTP/2 requests on that connection. + +When `httpServerCreds` is absent, the existing `runHTTP2Server` path is unchanged. + +``` +Native client (no SNI) ──TLS──> XFTP identity cert ──HTTP/2──> processRequest (no CORS) +Browser client (SNI) ──TLS──> Web CA cert ──HTTP/2──> processRequest (+ CORS) +``` + +### 2.3 Certificate Chain + +The web certificate file (e.g., `web.crt`) must contain the full chain: leaf certificate followed by the signing CA certificate. `loadServerCredential` uses `T.credentialLoadX509Chain` which reads all PEM blocks from the file. + +The client validates the chain by comparing `idCert` fingerprint (the CA cert, second in the 2-cert chain) against the known `keyHash`. This is the same validation as for XFTP identity certificates — the CA that signed the web cert must match the XFTP server's identity. + +## 3. CORS Support + +### 3.1 Design + +CORS headers are only added when both conditions are true: +- `addCORSHeaders` is `True` in `TransportServerConfig` (set in XFTP `Main.hs`) +- `sniUsed` is `True` for the current TLS connection + +This ensures native clients never see CORS headers. + +### 3.2 Response Headers + +All POST responses on SNI connections include: +``` +Access-Control-Allow-Origin: * +Access-Control-Expose-Headers: * +``` + +### 3.3 OPTIONS Preflight + +OPTIONS requests are intercepted at the HTTP/2 dispatch level, before `processRequest`. This is necessary because `processRequest` rejects bodies that don't match `xftpBlockSize`. + +Preflight response: +``` +HTTP/2 200 +Access-Control-Allow-Origin: * +Access-Control-Allow-Methods: POST, OPTIONS +Access-Control-Allow-Headers: * +Access-Control-Max-Age: 86400 +``` + +### 3.4 Security + +`Access-Control-Allow-Origin: *` is safe because: +- All XFTP commands require Ed25519 authentication (per-chunk keys from file description). +- No cookies or browser credentials are involved. +- File content is end-to-end encrypted. + +## 4. Configuration + +### 4.1 INI Template + +```ini +[WEB] +# cert: /etc/opt/simplex-xftp/web.crt +# key: /etc/opt/simplex-xftp/web.key +``` + +Commented out by default — web support is opt-in. + +### 4.2 Behavior + +- `[WEB]` section not configured: silently ignored, server operates normally for native clients only. +- `[WEB]` section configured with valid cert/key paths: SNI + CORS enabled. +- `[WEB]` section configured with missing cert files: warning + continue (non-fatal, unlike SMP where it is fatal). + +## 5. Files Modified + +### 5.1 `src/Simplex/Messaging/Transport/Server.hs` + +Added `addCORSHeaders :: Bool` field to `TransportServerConfig`. Updated `mkTransportServerConfig` to accept the new parameter. All existing SMP call sites pass `False`. + +### 5.2 `src/Simplex/Messaging/Transport/HTTP2/Server.hs` + +- Extracted `expireInactiveClient` from `runHTTP2ServerWith_`'s `where` clause to a module-level function. +- Parameterized `runHTTP2ServerWith_`: setup type changed from `((TLS p -> IO ()) -> a)` to `(((Bool, TLS p) -> IO ()) -> a)`, callback from `HTTP2ServerFunc` to `Bool -> HTTP2ServerFunc`. The `Bool` is the per-connection `sniUsed` flag, threaded through `H.run` to the callback. +- Extended `runHTTP2Server` with `Maybe T.Credential` parameter for SNI web certificate. Its setup uses `runTransportServerState_` with `TLSServerCredential`, which naturally provides `(sniUsed, tls)` pairs matching the new `runHTTP2ServerWith_` setup type. +- Adapted `runHTTP2ServerWith` (client-side HTTP/2, no SNI): wraps its setup to inject `(False, tls)` and its callback with `const`. +- Updated `getHTTP2Server` (test helper) to pass `Nothing` for httpCreds. + +### 5.3 `src/Simplex/FileTransfer/Server/Env.hs` + +- Added `httpCredentials :: Maybe ServerCredentials` to `XFTPServerConfig`. +- Added `httpServerCreds :: Maybe T.Credential` to `XFTPEnv`. +- `newXFTPServerEnv` loads HTTP credentials when configured. + +### 5.4 `src/Simplex/FileTransfer/Server/Main.hs` + +- Added `[WEB]` section to INI template. +- Added `httpCredentials` parsing from INI `[WEB]` section (`cert` and `key` fields). +- Set `addCORSHeaders = isJust httpCredentials_` in transport config (conditional on web cert presence). + +### 5.5 `src/Simplex/FileTransfer/Server.hs` + +Core server changes: + +- `runServer` calls `runHTTP2Server` with `httpCreds_` and a `\sniUsed -> handleRequest (sniUsed && addCORSHeaders transportConfig)` callback. TLS params are `defaultSupportedParamsHTTPS` when web creds present, `defaultSupportedParams` otherwise. SNI routing, HTTP/2 setup, and client expiration are handled inside `runHTTP2Server`. + +- `XFTPTransportRequest` carries `addCORS :: Bool` field, threaded through to `sendXFTPResponse`. + +- `sendXFTPResponse` conditionally includes CORS headers based on `addCORS`. + +- OPTIONS requests on SNI connections return CORS preflight headers before reaching `processRequest`. + +- Helper functions: `corsHeaders` (response headers), `corsPreflightHeaders` (preflight headers). + +### 5.6 `tests/XFTPClient.hs` + +- Added `httpCredentials = Nothing` to `testXFTPServerConfig`. +- Added `testXFTPServerConfigSNI` with web cert config and `addCORSHeaders = True`. +- Added `withXFTPServerSNI` helper. + +### 5.7 `tests/XFTPServerTests.hs` + +Added SNI and CORS tests as a subsection within `xftpServerTests` (6 tests): + +1. **SNI cert selection** — Connect with SNI + `h2` ALPN, verify RSA web certificate is presented. +2. **Non-SNI cert selection** — Connect without SNI + `xftp/1` ALPN, verify Ed448 XFTP certificate is presented. +3. **CORS headers** — SNI POST request includes `Access-Control-Allow-Origin: *` and `Access-Control-Expose-Headers: *`. +4. **OPTIONS preflight** — SNI OPTIONS request returns all CORS preflight headers. +5. **No CORS without SNI** — Non-SNI POST request has no CORS headers. +6. **File chunk delivery** — Full XFTP file chunk upload/download through SNI-enabled server verifying no regression. + +## 6. Remaining Work + +- **Web handshake** (§6.3 of parent RFC): Challenge-response identity proof for SNI connections. The server detects web clients via the `sniUsed` flag and expects a 32-byte challenge in the first POST body (non-empty, unlike standard handshake). Response includes full cert chain + signature over `(challenge ++ sessionId)`. +- **Static page serving** (§6.5 of parent RFC): Optional serving of the web page HTML/JS bundle on GET requests. diff --git a/rfcs/2026-01-30-send-file-page/2026-02-02-xftp-web-handshake.md b/rfcs/2026-01-30-send-file-page/2026-02-02-xftp-web-handshake.md new file mode 100644 index 0000000000..de23bbf8b4 --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-02-02-xftp-web-handshake.md @@ -0,0 +1,246 @@ +# Web Handshake — Challenge-Response Identity Proof + +RFC §6.3: Server proves XFTP identity to web clients independently of TLS CA infrastructure. + +## 1. Protocol + +**Standard handshake** (unchanged): +``` +Client → empty POST → Server +Server → padded {vRange, sessionId, authPubKey, Nothing} → Client +Client → padded {version, keyHash, Nothing} → Server +Server → empty → Client +``` + +**Web handshake** (SNI connection, non-empty hello): +``` +Client → padded {32 random bytes} → Server +Server → padded {vRange, sessionId, authPubKey, Just sigBytes} → Client + sigBytes = signatureBytes(sign(identityLeafKey, challenge <> sessionId)) +Client validates: + 1. chainIdCaCerts(authPubKey.certChain) → CCValid {leafCert, idCert} + 2. SHA-256(idCert) == keyHash (server identity) + 3. verify(leafCert.pubKey, sigBytes, challenge <> sessionId) (challenge-response) + 4. verify(leafCert.pubKey, signedPubKey.signature, signedPubKey.objectDer) (DH key auth) +Client → padded {version, keyHash, Just challenge} → Server +Server verifies: echoed challenge == stored challenge from step 1 +Server → empty → Client +``` + +**Detection**: `sniUsed` per-connection flag. Non-empty hello allowed only when `sniUsed`. Empty hello with SNI → standard handshake. + +**Why both steps 3 and 4**: Native clients verify `signedPubKey` using the TLS peer certificate (`serverKey` from `getServerVerifyKey`), which is the XFTP identity cert in non-SNI connections — TLS provides this binding. Web clients cannot access TLS peer certificate data (browser API limitation; TLS presents the web CA cert but provides no API to extract it). So web clients must verify at the application layer using `authPubKey.certChain`, which always contains the XFTP identity chain regardless of which cert TLS used. Step 3 proves the server holds its identity key *right now* (freshness via random challenge). Step 4 proves the DH session key was signed by the identity key holder (prevents MITM key substitution). Together they give web clients some assurance native clients get from TLS, except channel binding for commands. + +## 2. Type Changes — `src/Simplex/FileTransfer/Transport.hs` + +### `XFTPServerHandshake` (line 114) + +Add field: `webIdentityProof :: Maybe ByteString` — raw Ed448 signature bytes (114 bytes), or `Nothing` for standard handshake. No record needed — the cert chain is already in `authPubKey.certChain`. + +### `Encoding XFTPServerHandshake` (line 136) + +- `smpEncode`: append `smpEncode webIdentityProof` +- `smpP`: `Tail compat`, if non-empty `eitherToMaybe $ smpDecode compat` + +Backward compat: old clients ignore via `Tail _compat`; new client + old server → empty compat → `Nothing`. + +### `XFTPClientHandshake` (line 121) + +Add field: `webChallenge :: Maybe ByteString` + +### `Encoding XFTPClientHandshake` (line 128) + +Same `Tail compat` pattern as server handshake. + +### Export list + +Both types use `(..)` export — new fields auto-exported. + +## 3. Server Changes — `src/Simplex/FileTransfer/Server.hs` + +### `XFTPTransportRequest` (line 88) + +Add field: `sniUsed :: SNICredentialUsed` (`Bool` from `Transport.Server`). Add import. + +### `Handshake` (line 117) + +`HandshakeSent C.PrivateKeyX25519` → `HandshakeSent C.PrivateKeyX25519 (Maybe ByteString)` — stores 32-byte web challenge or `Nothing`. + +### `runServer` handler (line 145–161) + +- Pass `sniUsed` into request construction (line 154) +- SNI-first routing: when `sniUsed`, always route to `xftpServerHandshakeV1` (web ALPN `h2` would otherwise fall to `_` catch-all) + +### `xftpServerHandshakeV1` (line 162) + +- Destructure `sniUsed` from request +- Match `HandshakeSent pk challenge_` → `processClientHandshake pk challenge_` + +### `processHello` (line 171) + +- Branch `(sniUsed, B.null bodyHead)`: + - `(_, True)` → standard: `challenge_ = Nothing` + - `(True, False)` → web: unpad, verify 32 bytes, `challenge_ = Just` + - `(False, False)` → `throwE HANDSHAKE` +- Store: `HandshakeSent pk challenge_` +- Compute: `webIdentityProof = C.signatureBytes . C.sign serverSignKey . (<> sessionId) <$> challenge_` +- Construct `XFTPServerHandshake` with `webIdentityProof` + +### `processClientHandshake` (line 183) + +- Accept `challenge_` parameter +- Decode `webChallenge` from `XFTPClientHandshake` +- Add: `unless (challenge_ == webChallenge) $ throwE HANDSHAKE` + (standard: both `Nothing` → passes) + +## 4. Native Client — `src/Simplex/FileTransfer/Client.hs` + +### `xftpClientHandshakeV1` (line 142) + +Add `webChallenge = Nothing` in `sendClientHandshake` call. + +No other changes — parser handles new fields via `Tail`, native client ignores `webIdentityProof`. + +## 5. TypeScript Changes (DONE except Ed448) + +Sections 5.1 and 5.2 are implemented. Section 5.3 needs Ed448 support. + +## 10. Ed448 Support via `@noble/curves` + +**Problem**: Production servers use Ed448 certificates (default). `identity.ts` only supports Ed25519 via libsodium. libsodium has no Ed448 support and never will. + +**Solution**: Add `@noble/curves` dependency for Ed448 verification only. All other crypto stays with libsodium. + +### 10.1 `xftp-web/package.json` — Add dependency + +```json +"dependencies": { + "libsodium-wrappers-sumo": "^0.7.13", + "@noble/curves": "^1.9.7" +} +``` + +Use v1.x (supports both CJS and ESM). v2.x is ESM-only with `.js` extension requirement. + +### 10.2 `xftp-web/src/crypto/keys.ts` — Ed448 DER constants and decode + +Add Ed448 SPKI DER prefix (12 bytes, same prefix length as Ed25519): +``` +30 43 30 05 06 03 2b 65 71 03 3a 00 +``` + +| Property | Ed25519 | Ed448 | +|----------|---------|-------| +| OID | `2b 65 70` | `2b 65 71` | +| SPKI prefix | `30 2a ...` | `30 43 ...` | +| Raw key size | 32 bytes | 57 bytes | +| SPKI total | 44 bytes | 69 bytes | +| Signature size | 64 bytes | 114 bytes | + +New functions: +- `decodePubKeyEd448(der: Uint8Array): Uint8Array` — 69 bytes → 57 bytes raw +- `encodePubKeyEd448(raw: Uint8Array): Uint8Array` — 57 bytes → 69 bytes DER +- `verifyEd448(publicKey: Uint8Array, sig: Uint8Array, msg: Uint8Array): boolean` — uses `ed448.verify(sig, msg, publicKey)` from `@noble/curves/ed448` + +Note: `@noble/curves` parameter order is `(signature, message, publicKey)`, not `(publicKey, signature, message)`. + +### 10.3 `xftp-web/src/crypto/identity.ts` — Algorithm-agnostic verification + +Replace `extractCertEd25519Key` + hardcoded Ed25519 `verify` with algorithm detection: + +1. `extractCertPublicKeyInfo(certDer)` → SPKI DER (already exists, works for any algorithm) +2. Detect algorithm from SPKI: byte at offset 8 is `0x70` (Ed25519) or `0x71` (Ed448) +3. Extract raw key with appropriate decoder +4. Verify signatures with appropriate function + +```typescript +type CertKeyAlgorithm = 'ed25519' | 'ed448' + +function detectKeyAlgorithm(spki: Uint8Array): CertKeyAlgorithm { + if (spki.length === 44 && spki[8] === 0x70) return 'ed25519' + if (spki.length === 69 && spki[8] === 0x71) return 'ed448' + throw new Error("unsupported certificate key algorithm") +} +``` + +`verifyIdentityProof` changes: +- Extract SPKI from leaf cert +- Detect algorithm → choose `decodePubKeyEd25519`/`decodePubKeyEd448` and `verify`/`verifyEd448` +- Both challenge signature and DH key signature use the same leaf key + algorithm + +Remove `extractCertEd25519Key` (replaced by generic path). Keep `extractCertPublicKeyInfo` (already generic). + +### 10.4 `xftp-web/src/protocol/handshake.ts` — Comment update + +`SignedKey.signature` comment: "raw Ed25519 signature bytes (64 bytes)" → "raw signature bytes (Ed25519: 64, Ed448: 114)" + +### 10.5 Tests — `tests/XFTPWebTests.hs` + +**Integration test**: Switch from `withXFTPServerEd25519SNI` (Ed25519 fixtures) to `withXFTPServerSNI` (default Ed448 fixtures). Update fingerprint source from `tests/fixtures/ed25519/ca.crt` to `tests/fixtures/ca.crt`. + +Optionally add a second integration test with Ed25519 to cover both paths, or rely on existing unit tests for Ed25519 coverage. + +### 10.6 Implementation order + +1. `npm install @noble/curves` in `xftp-web/` +2. `keys.ts` — Ed448 constants, decode, encode, verifyEd448 +3. `identity.ts` — algorithm detection, generic verification +4. `handshake.ts` — comment fix +5. `XFTPWebTests.hs` — switch integration test to Ed448 +6. Build TS + run all tests + +## 6. Haskell Integration Test — `tests/XFTPServerTests.hs` + +Add `testWebHandshake` to "XFTP SNI and CORS" describe block. + +1. `withXFTPServerSNI` — server with web credentials +2. Connect with SNI + `h2` ALPN +3. Send padded 32-byte challenge +4. Decode `XFTPServerHandshake`, assert `webIdentityProof` is `Just` +5. `chainIdCaCerts` on `authPubKey.certChain` → `CCValid {leafCert, idCert}` +6. Verify `SHA-256(idCert) == keyHash` +7. Extract `leafCert` public key, verify challenge signature +8. Verify `signedPubKey` signature using `leafCert` key (DH key auth) +9. Send `XFTPClientHandshake` with `webChallenge = Just challenge` +10. Assert empty response + +Imports: `XFTPServerHandshake (..)`, `XFTPClientHandshake (..)`, `ChainCertificates (..)`, `chainIdCaCerts`. + +## 7. TS Tests — `tests/XFTPWebTests.hs` + +### Unit tests + +- **`decodeServerHandshake` with proof**: Haskell-encode with `Just sigBytes`, TS-decode, verify bytes match. +- **`encodeClientHandshake` with challenge**: TS-encode, compare with Haskell-encoded. +- **`chainIdCaCerts`**: 2/3/4-cert chains return correct positions. +- **`caFingerprint` (fixed)**: matches `sha256(idCert)` for 2 and 3-cert chains. + +### Integration test + +Node.js inline script against `withXFTPServerSNI`: +1. Connect with SNI via `http2.connect` +2. Send padded challenge, decode `XFTPServerHandshake` with TS +3. `verifyIdentityProof` — full chain validation + challenge sig + DH key sig +4. Send client handshake with echoed challenge +5. Assert empty response + +## 8. Implementation Order + +1. `Transport.hs` — `Maybe` fields + encoding instances +2. `Server.hs` — `sniUsed`, challenge in `Handshake`, `processHello`, `processClientHandshake`, SNI routing +3. `Client.hs` — `webChallenge = Nothing` +4. Build: `cabal build --ghc-options -O0` +5. Run existing SNI/CORS tests +6. `XFTPServerTests.hs` — `testWebHandshake` +7. `handshake.ts` — types, decoding, `chainIdCaCerts`, fix `caFingerprint` +8. `crypto/identity.ts` — Node.js verification functions +9. `XFTPWebTests.hs` — unit + integration tests +10. Build TS + run all tests + +## 9. Verification + +```bash +cd xftp-web && npm install && npm run build && cd .. +cabal test --ghc-options=-O0 --test-option='--match=/XFTP/XFTP server/XFTP SNI and CORS/' --test-show-details=streaming +cabal test --ghc-options=-O0 --test-option='--match=/XFTP Web Client/' --test-show-details=streaming +``` diff --git a/rfcs/2026-01-30-send-file-page/2026-02-03-xftp-web-browser-tests.md b/rfcs/2026-01-30-send-file-page/2026-02-03-xftp-web-browser-tests.md new file mode 100644 index 0000000000..2e08a2efb7 --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-02-03-xftp-web-browser-tests.md @@ -0,0 +1,208 @@ +# Plan: Browser ↔ Haskell File Transfer Tests + +## Table of Contents +1. Goal +2. Current State +3. Implementation +4. Success Criteria +5. Files +6. Order + +## 1. Goal +Run browser upload/download tests in headless Chromium via Vitest, proving fetch-based transport works in real browser environment. + +## 2. Current State +- `client.ts`: Transport abstraction done — http2 for Node, fetch for browser ✓ +- `agent.ts`: Uses `node:crypto` (randomBytes) and `node:zlib` (deflateRawSync/inflateRawSync) — **won't run in browser** +- `XFTPWebTests.hs`: Cross-language tests exist (Haskell calls TS via Node.js) ✓ + +## 3. Implementation + +### 3.1 Make agent.ts isomorphic + +| Current (Node.js only) | Isomorphic replacement | +|------------------------|------------------------| +| `import crypto from "node:crypto"` | Remove import | +| `import zlib from "node:zlib"` | `import pako from "pako"` | +| `crypto.randomBytes(32)` | `crypto.getRandomValues(new Uint8Array(32))` | +| `zlib.deflateRawSync(buf)` | `pako.deflateRaw(buf)` | +| `zlib.inflateRawSync(buf)` | `pako.inflateRaw(buf)` | + +Note: `crypto.getRandomValues` available in both browser and Node.js (globalThis.crypto). + +### 3.2 Vitest browser mode setup + +`package.json` additions: +```json +"devDependencies": { + "vitest": "^3.0.0", + "@vitest/browser": "^3.0.0", + "playwright": "^1.50.0", + "@types/pako": "^2.0.3" +}, +"dependencies": { + "pako": "^2.1.0" +} +``` + +`vitest.config.ts`: +```typescript +import {defineConfig} from 'vitest/config' +import {readFileSync} from 'fs' +import {createHash} from 'crypto' + +// Compute fingerprint from ca.crt (same as Haskell's loadFileFingerprint) +const caCert = readFileSync('../tests/fixtures/ca.crt') +const fingerprint = createHash('sha256').update(caCert).digest('base64url') +const serverAddr = `xftp://${fingerprint}@localhost:7000` + +export default defineConfig({ + define: { + 'import.meta.env.XFTP_SERVER': JSON.stringify(serverAddr) + }, + test: { + browser: { + enabled: true, + provider: 'playwright', + instances: [{browser: 'chromium'}], + headless: true, + providerOptions: { + launch: {ignoreHTTPSErrors: true} + } + }, + globalSetup: './test/globalSetup.ts' + } +}) +``` + +### 3.3 Server startup + +`test/globalSetup.ts`: +```typescript +import {spawn, ChildProcess} from 'child_process' +import {resolve, join} from 'path' +import {mkdtempSync, writeFileSync, copyFileSync} from 'fs' +import {tmpdir} from 'os' + +let server: ChildProcess | null = null + +export async function setup() { + const fixtures = resolve(__dirname, '../../tests/fixtures') + + // Create temp directories + const cfgDir = mkdtempSync(join(tmpdir(), 'xftp-cfg-')) + const logDir = mkdtempSync(join(tmpdir(), 'xftp-log-')) + const filesDir = mkdtempSync(join(tmpdir(), 'xftp-files-')) + + // Copy certificates to cfgDir (xftp-server expects ca.crt, server.key, server.crt there) + copyFileSync(join(fixtures, 'ca.crt'), join(cfgDir, 'ca.crt')) + copyFileSync(join(fixtures, 'server.key'), join(cfgDir, 'server.key')) + copyFileSync(join(fixtures, 'server.crt'), join(cfgDir, 'server.crt')) + + // Write INI config file + const iniContent = `[STORE_LOG] +enable: off + +[TRANSPORT] +host: localhost +port: 7000 + +[FILES] +path: ${filesDir} + +[WEB] +cert: ${join(fixtures, 'web.crt')} +key: ${join(fixtures, 'web.key')} +` + writeFileSync(join(cfgDir, 'file-server.ini'), iniContent) + + // Spawn xftp-server with env vars + server = spawn('cabal', ['exec', 'xftp-server', '--', 'start'], { + env: { + ...process.env, + XFTP_SERVER_CFG_PATH: cfgDir, + XFTP_SERVER_LOG_PATH: logDir + }, + stdio: ['ignore', 'pipe', 'pipe'] + }) + + // Wait for "Listening on port 7000..." + await waitForServerReady(server) +} + +export async function teardown() { + server?.kill('SIGTERM') + await new Promise(r => setTimeout(r, 500)) +} + +function waitForServerReady(proc: ChildProcess): Promise { + return new Promise((resolve, reject) => { + const timeout = setTimeout(() => reject(new Error('Server start timeout')), 15000) + proc.stdout?.on('data', (data: Buffer) => { + if (data.toString().includes('Listening on port')) { + clearTimeout(timeout) + resolve() + } + }) + proc.stderr?.on('data', (data: Buffer) => { + console.error('[xftp-server]', data.toString()) + }) + proc.on('error', reject) + proc.on('exit', (code) => { + clearTimeout(timeout) + if (code !== 0) reject(new Error(`Server exited with code ${code}`)) + }) + }) +} +``` + +Server env vars (from `apps/xftp-server/Main.hs` + `getEnvPath`): +- `XFTP_SERVER_CFG_PATH` — directory containing `file-server.ini` and certs (`ca.crt`, `server.key`, `server.crt`) +- `XFTP_SERVER_LOG_PATH` — directory for logs + +### 3.4 Browser test + +`test/browser.test.ts`: +```typescript +import {test, expect} from 'vitest' +import {encryptFileForUpload, uploadFile, downloadFile} from '../src/agent.js' +import {parseXFTPServer} from '../src/protocol/address.js' + +const server = parseXFTPServer(import.meta.env.XFTP_SERVER) + +test('browser upload + download round-trip', async () => { + const data = new Uint8Array(50000) + crypto.getRandomValues(data) + const encrypted = encryptFileForUpload(data, 'test.bin') + const {rcvDescription} = await uploadFile(server, encrypted) + const {content} = await downloadFile(rcvDescription) + expect(content).toEqual(data) +}) +``` + +## 4. Success Criteria + +1. `npm run build` — agent.ts compiles without node: imports +2. `cabal test --test-option='--match=/XFTP Web Client/'` — existing Node.js tests still pass +3. `npm run test:browser` — browser round-trip test passes in headless Chromium + +## 5. Files to Create/Modify + +**Modify:** +- `xftp-web/package.json` — add vitest, @vitest/browser, playwright, pako, @types/pako +- `xftp-web/src/agent.ts` — replace node:crypto, node:zlib with isomorphic alternatives + +**Create:** +- `xftp-web/vitest.config.ts` — browser mode config +- `xftp-web/test/globalSetup.ts` — xftp-server lifecycle +- `xftp-web/test/browser.test.ts` — browser round-trip test + +## 6. Order of Implementation + +1. **Add pako dependency** — `npm install pako @types/pako` +2. **Make agent.ts isomorphic** — replace node:crypto, node:zlib +3. **Verify Node.js tests pass** — `cabal test --test-option='--match=/XFTP Web Client/'` +4. **Set up Vitest** — add devDeps, create vitest.config.ts +5. **Create globalSetup.ts** — write INI config, spawn xftp-server +6. **Write browser test** — upload + download round-trip +7. **Verify browser test passes** — `npm run test:browser` diff --git a/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-browser-transport.md b/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-browser-transport.md new file mode 100644 index 0000000000..41915bf64d --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-browser-transport.md @@ -0,0 +1,920 @@ +# Browser Transport & Web Worker Architecture + +## TOC + +1. Executive Summary +2. Transport: fetch() API +3. Architecture: Environment Abstraction +4. Web Worker Implementation +5. OPFS Implementation +6. Implementation Plan +7. Testing Strategy + +## 1. Executive Summary + +Adapt `client.ts` from `node:http2` to `fetch()` API for isomorphic Node.js/browser support. Add environment abstraction layer so the same upload/download pipeline works with or without Web Workers and with or without OPFS. In browsers, crypto runs in a Web Worker to keep UI responsive; in Node.js tests, crypto runs directly. + +**Key architectural constraint:** Existing crypto functions (`encryptFile`, `decryptChunks`, etc.) remain unchanged. The abstraction layer wraps them, choosing execution context (direct vs Worker) and storage (memory vs OPFS) based on environment. + +**Scope:** +- Replace `node:http2` with `fetch()` in `client.ts` +- Add `CryptoBackend` abstraction with three implementations +- Create Web Worker that calls existing crypto functions +- Add OPFS storage for large files in browser + +**Out of scope:** Web page UI (Phase 5 in main RFC). + +## 2. Transport: fetch() API + +### 2.1 Current State + +`client.ts` uses `node:http2`: +```typescript +import http2 from "node:http2" +const session = http2.connect(url) +const stream = session.request({':method': 'POST', ':path': '/'}) +stream.write(commandBlock) +stream.end(chunkData) +``` + +### 2.2 Target State + +Isomorphic `fetch()` (Node.js 18+ and browsers): +```typescript +const response = await fetch(url, { + method: 'POST', + body: concatStreams(commandBlock, chunkData), + duplex: 'half', // Required for streaming request body +}) +const reader = response.body!.getReader() +``` + +### 2.3 Key Differences + +| Aspect | node:http2 | fetch() | +|--------|-----------|---------| +| Session management | Explicit `session.connect()` / `session.close()` | Per-request (HTTP/2 connection reuse is automatic) | +| Streaming upload | `stream.write()` chunks | `ReadableStream` body + `duplex: 'half'` | +| Streaming download | `stream.on('data')` | `response.body.getReader()` | +| Connection pooling | Manual | Automatic per origin | + +### 2.4 API Changes + +```typescript +// Before (node:http2) +export interface XFTPClient { + session: http2.ClientHttp2Session + thParams: THParams + server: XFTPServer +} + +// After (fetch) +export interface XFTPClient { + baseUrl: string // "https://host:port" + thParams: THParams + server: XFTPServer +} +``` + +`connectXFTP()` performs handshake via fetch, returns `XFTPClient` with `baseUrl`. +Subsequent commands use `fetch(client.baseUrl, ...)`. + +### 2.5 Handshake via fetch() + +**TLS session binding:** Multiple fetch() requests to the same origin reuse the HTTP/2 connection, which means they share the same TLS session. The server's `sessionId` (derived from TLS channel binding) remains consistent across the handshake round-trips and subsequent commands. + +```typescript +async function connectXFTP(server: XFTPServer): Promise { + const baseUrl = `https://${server.host}:${server.port}` + + // Round-trip 1: challenge → server handshake + identity proof + const challenge = crypto.getRandomValues(new Uint8Array(32)) + const req1 = pad(encodeWebClientHello(challenge), xftpBlockSize) + const resp1 = await fetch(baseUrl, {method: 'POST', body: req1}) + + const reader = resp1.body!.getReader() + const serverBlock = await readExactly(reader, xftpBlockSize) + const serverHs = decodeServerHandshake(unPad(serverBlock)) + const proofBody = await readRemaining(reader) + verifyIdentityProof(server.keyHash, challenge, serverHs.sessionId, proofBody) + + // Round-trip 2: client handshake → server ack + const clientHs = encodeClientHandshake({xftpVersion: 3, keyHash: server.keyHash}) + const req2 = pad(clientHs, xftpBlockSize) + await fetch(baseUrl, {method: 'POST', body: req2}) + + return {baseUrl, thParams: {sessionId: serverHs.sessionId, ...}, server} +} +``` + +### 2.6 Command Execution + +```typescript +async function sendXFTPCommand( + client: XFTPClient, + key: Uint8Array, + entityId: Uint8Array, + cmd: Uint8Array, + chunkData?: Uint8Array +): Promise<{response: Uint8Array, body?: ReadableStream}> { + const block = xftpEncodeAuthTransmission(client.thParams, key, entityId, cmd) + + const reqBody = chunkData + ? concatBytes(block, chunkData) + : block + + const resp = await fetch(client.baseUrl, { + method: 'POST', + body: reqBody, + duplex: 'half', + }) + + const reader = resp.body!.getReader() + const responseBlock = await readExactly(reader, xftpBlockSize) + const parsed = xftpDecodeTransmission(responseBlock) + + // For FGET: remaining body is encrypted chunk + const hasMore = await peekReader(reader) + return { + response: parsed, + body: hasMore ? wrapAsStream(reader) : undefined + } +} +``` + +## 3. Architecture: Environment Abstraction + +### 3.1 Core Principle + +**Existing crypto functions remain unchanged.** The functions `encryptFile()`, `decryptChunks()`, `sha512()`, etc. in `crypto/file.ts` and `crypto/digest.ts` are pure computation — they take input bytes and produce output bytes. They have no knowledge of Workers, OPFS, or execution context. + +The abstraction layer sits between `agent.ts` (upload/download orchestration) and these crypto functions: + +``` +┌─────────────────────────────────────────────────────────────────────┐ +│ agent.ts (upload/download orchestration) │ +│ - Unchanged logic: encrypt → chunk → upload → build description │ +│ - Calls CryptoBackend interface, not crypto functions directly │ +├─────────────────────────────────────────────────────────────────────┤ +│ CryptoBackend interface (env.ts) │ +│ - Abstract interface for encrypt/decrypt/readChunk/writeChunk │ +│ - Factory function selects implementation based on environment │ +├──────────────┬──────────────────────┬───────────────────────────────┤ +│ DirectMemory │ WorkerMemory │ WorkerOPFS │ +│ Backend │ Backend │ Backend │ +│ (Node.js) │ (Browser, ≤50MB) │ (Browser, >50MB) │ +├──────────────┼──────────────────────┼───────────────────────────────┤ +│ Calls crypto │ Posts to Worker, │ Posts to Worker, │ +│ functions │ Worker calls crypto │ Worker calls crypto, │ +│ directly │ functions, returns │ streams through OPFS │ +│ │ via postMessage │ │ +├──────────────┴──────────────────────┴───────────────────────────────┤ +│ crypto/file.ts, crypto/digest.ts (unchanged) │ +│ - encryptFile(), decryptChunks(), sha512(), etc. │ +│ - Pure functions, no environment dependencies │ +└─────────────────────────────────────────────────────────────────────┘ +``` + +### 3.2 CryptoBackend Interface + +```typescript +// env.ts +export interface CryptoBackend { + // Encrypt file, store result (in memory or OPFS depending on backend) + encrypt( + data: Uint8Array, + fileName: string, + onProgress?: (done: number, total: number) => void + ): Promise + + // Decrypt from stored encrypted data + decrypt( + key: Uint8Array, + nonce: Uint8Array, + size: number, + onProgress?: (done: number, total: number) => void + ): Promise + + // Read chunk from stored encrypted data (for upload) + readChunk(offset: number, size: number): Promise + + // Write chunk to storage (for download, before decrypt) + writeChunk(data: Uint8Array, offset: number): Promise + + // Clean up temporary storage + cleanup(): Promise +} + +export interface EncryptResult { + digest: Uint8Array // SHA-512 of encrypted data + key: Uint8Array // Generated encryption key + nonce: Uint8Array // Generated nonce + chunkSizes: number[] // Chunk sizes for upload + totalSize: number // Total encrypted size +} + +export interface DecryptResult { + header: FileHeader // Extracted file header (fileName, etc.) + content: Uint8Array // Decrypted file content +} +``` + +### 3.3 Backend Implementations + +**DirectMemoryBackend** (Node.js): +```typescript +class DirectMemoryBackend implements CryptoBackend { + private encryptedData: Uint8Array | null = null + + async encrypt(data: Uint8Array, fileName: string, onProgress?): Promise { + const key = randomBytes(32) + const nonce = randomBytes(24) + // Call existing crypto function directly + this.encryptedData = encryptFile(data, fileName, key, nonce, onProgress) + const digest = sha512(this.encryptedData) + const chunkSizes = prepareChunkSizes(this.encryptedData.length) + return { digest, key, nonce, chunkSizes, totalSize: this.encryptedData.length } + } + + async decrypt(key, nonce, size, onProgress): Promise { + // Call existing crypto function directly + return decryptChunks([this.encryptedData!], key, nonce, size, onProgress) + } + + async readChunk(offset: number, size: number): Promise { + return this.encryptedData!.slice(offset, offset + size) + } + + async writeChunk(data: Uint8Array, offset: number): Promise { + if (!this.encryptedData) this.encryptedData = new Uint8Array(offset + data.length) + this.encryptedData.set(data, offset) + } + + async cleanup(): Promise { + this.encryptedData = null + } +} +``` + +**WorkerMemoryBackend** and **WorkerOPFSBackend** are similar but post messages to a Web Worker instead of calling crypto directly. The Worker then calls the same `encryptFile()`, `decryptChunks()` functions. See §4 for Worker implementation details. + +### 3.4 Factory Function + +```typescript +// env.ts +export function createCryptoBackend(fileSize: number): CryptoBackend { + const hasWorker = typeof Worker !== 'undefined' + const hasOPFS = typeof navigator?.storage?.getDirectory !== 'undefined' + const isLargeFile = fileSize > 50 * 1024 * 1024 + + if (hasWorker && hasOPFS && isLargeFile) { + return new WorkerOPFSBackend() // Browser + large file + } else if (hasWorker) { + return new WorkerMemoryBackend() // Browser + small file + } else { + return new DirectMemoryBackend() // Node.js + } +} +``` + +### 3.5 Usage in agent.ts + +```typescript +// agent.ts - upload orchestration (simplified) +export async function uploadFile( + server: XFTPServer, + fileData: Uint8Array, + fileName: string, + onProgress?: ProgressCallback +): Promise { + // Create backend based on environment + const backend = createCryptoBackend(fileData.length) + + try { + // Encrypt (runs in Worker in browser, directly in Node) + const enc = await backend.encrypt(fileData, fileName, onProgress) + + // Upload chunks (same code regardless of backend) + const client = await connectXFTP(server) + const sentChunks = [] + let offset = 0 + for (const size of enc.chunkSizes) { + const chunk = await backend.readChunk(offset, size) + const sent = await uploadChunk(client, chunk, enc.digest) + sentChunks.push(sent) + offset += size + } + + // Build description and URI + const fd = buildFileDescription(enc, sentChunks) + return encodeFileDescriptionURI(fd) + } finally { + await backend.cleanup() + } +} +``` + +The key point: `uploadFile()` logic is identical regardless of whether crypto runs in a Worker or directly. The `CryptoBackend` abstraction hides that detail. + +### 3.6 Why This Matters for Testing + +- **Layer 1 tests** (per-function): Call `encryptFile()`, `decryptChunks()` directly via Node — unchanged +- **Layer 2 tests** (full flow): Call `uploadFile()`, `downloadFile()` in Node — uses `DirectMemoryBackend`, same code path as browser except for Worker +- **Layer 3 tests** (browser): Call `uploadFile()`, `downloadFile()` in Playwright — uses `WorkerMemoryBackend` or `WorkerOPFSBackend` + +All three layers exercise the same crypto functions. The only difference is execution context. + +## 4. Web Worker Implementation + +### 4.1 Why Web Worker + +File encryption (XSalsa20-Poly1305) is sequential and CPU-bound: +- 100 MB file ≈ 1-2 seconds of continuous computation +- Running on main thread blocks UI (no progress updates, frozen page) +- Chunking into async microtasks adds complexity and still causes jank + +Web Worker runs crypto in parallel thread. Main thread stays responsive. + +### 4.2 Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Main Thread │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────┐ │ +│ │ UI (upload/ │ │ Progress │ │ Network (fetch) │ │ +│ │ download) │ │ display │ │ │ │ +│ └──────┬──────┘ └──────▲──────┘ └──────────▲──────────┘ │ +│ │ │ │ │ +│ │ postMessage │ progress │ encrypted │ +│ ▼ │ events │ chunks │ +├─────────────────────────────────────────────────────────────┤ +│ Web Worker │ +│ ┌─────────────────────────────────────────────────────────┐│ +│ │ Crypto Pipeline ││ +│ │ - encryptFile() with progress callbacks ││ +│ │ - decryptChunks() with progress callbacks ││ +│ │ - OPFS read/write for temp storage ││ +│ └─────────────────────────────────────────────────────────┘│ +└─────────────────────────────────────────────────────────────┘ +``` + +### 4.3 Message Protocol + +**Main → Worker:** + +```typescript +type WorkerRequest = + // Encrypt file, store result in OPFS (large) or memory (small) + | {type: 'encrypt', file: File, fileName: string, useOPFS: boolean} + // Read encrypted chunk from OPFS for upload + | {type: 'readChunk', offset: number, size: number} + // Write downloaded chunk to OPFS for later decryption + | {type: 'writeChunk', data: ArrayBuffer, offset: number} + // Decrypt from OPFS or provided chunks + | {type: 'decrypt', key: Uint8Array, nonce: Uint8Array, size: number, chunks?: ArrayBuffer[]} + // Delete OPFS temp files + | {type: 'cleanup'} + | {type: 'cancel'} +``` + +**Worker → Main:** + +```typescript +type WorkerResponse = + | {type: 'progress', phase: 'encrypt' | 'decrypt', done: number, total: number} + // For OPFS: encData is empty, data lives in OPFS temp file + | {type: 'encrypted', encData: ArrayBuffer | null, digest: Uint8Array, key: Uint8Array, nonce: Uint8Array, chunkSizes: number[]} + | {type: 'chunk', data: ArrayBuffer} // Response to readChunk + | {type: 'chunkWritten'} // Response to writeChunk + | {type: 'decrypted', header: FileHeader, content: ArrayBuffer} + | {type: 'cleaned'} // Response to cleanup + | {type: 'error', message: string} +``` + +### 4.4 Worker Implementation + +```typescript +// crypto.worker.ts +import {encryptFile, encryptFileStreaming, decryptChunks, decryptFromOPFS} from './crypto/file.js' +import {sha512} from './crypto/digest.js' +import {prepareChunkSizes} from './protocol/chunks.js' + +let opfsHandle: FileSystemSyncAccessHandle | null = null + +self.onmessage = async (e: MessageEvent) => { + const req = e.data + + if (req.type === 'encrypt') { + const key = crypto.getRandomValues(new Uint8Array(32)) + const nonce = crypto.getRandomValues(new Uint8Array(24)) + + if (req.useOPFS) { + // Large file: stream through OPFS to avoid memory pressure + const root = await navigator.storage.getDirectory() + const fileHandle = await root.getFileHandle('encrypted-temp', {create: true}) + opfsHandle = await fileHandle.createSyncAccessHandle() + + // Stream encrypt: read 64KB from File, encrypt, write to OPFS + const digest = await encryptFileStreaming( + req.file, + req.fileName, + key, + nonce, + opfsHandle, + (done, total) => self.postMessage({type: 'progress', phase: 'encrypt', done, total}) + ) + + const encSize = opfsHandle.getSize() + const chunkSizes = prepareChunkSizes(encSize) + + self.postMessage({ + type: 'encrypted', + encData: null, // Data in OPFS, not memory + digest, key, nonce, chunkSizes + }) + } else { + // Small file: in-memory is fine + const source = new Uint8Array(await req.file.arrayBuffer()) + const encData = encryptFile(source, req.fileName, key, nonce, (done, total) => { + self.postMessage({type: 'progress', phase: 'encrypt', done, total}) + }) + + const digest = sha512(encData) + const chunkSizes = prepareChunkSizes(encData.length) + + self.postMessage({ + type: 'encrypted', + encData: encData.buffer, + digest, key, nonce, chunkSizes + }, [encData.buffer]) + } + } + + if (req.type === 'readChunk') { + // Read chunk from OPFS for upload + const chunk = new Uint8Array(req.size) + opfsHandle!.read(chunk, {at: req.offset}) + self.postMessage({type: 'chunk', data: chunk.buffer}, [chunk.buffer]) + } + + if (req.type === 'writeChunk') { + // Write downloaded chunk to OPFS + if (!opfsHandle) { + const root = await navigator.storage.getDirectory() + const fileHandle = await root.getFileHandle('download-temp', {create: true}) + opfsHandle = await fileHandle.createSyncAccessHandle() + } + opfsHandle.write(new Uint8Array(req.data), {at: req.offset}) + self.postMessage({type: 'chunkWritten'}) + } + + if (req.type === 'decrypt') { + let result + if (req.chunks) { + // Small file: chunks provided in memory + const chunks = req.chunks.map(b => new Uint8Array(b)) + result = decryptChunks(chunks, req.key, req.nonce, req.size, (done, total) => { + self.postMessage({type: 'progress', phase: 'decrypt', done, total}) + }) + } else { + // Large file: read from OPFS + result = decryptFromOPFS(opfsHandle!, req.key, req.nonce, req.size, (done, total) => { + self.postMessage({type: 'progress', phase: 'decrypt', done, total}) + }) + } + + self.postMessage({ + type: 'decrypted', + header: result.header, + content: result.content.buffer + }, [result.content.buffer]) + } + + if (req.type === 'cleanup') { + if (opfsHandle) { + opfsHandle.close() + opfsHandle = null + } + const root = await navigator.storage.getDirectory() + try { await root.removeEntry('encrypted-temp') } catch {} + try { await root.removeEntry('download-temp') } catch {} + self.postMessage({type: 'cleaned'}) + } +} +``` + +### 4.5 Main Thread Wrapper + +```typescript +// crypto-worker.ts (main thread) +export class CryptoWorker { + private worker: Worker + private pending: Map = new Map() + private onProgress?: (done: number, total: number) => void + + constructor() { + this.worker = new Worker(new URL('./crypto.worker.js', import.meta.url), {type: 'module'}) + this.worker.onmessage = (e) => this.handleMessage(e.data) + } + + async encrypt(file: File, onProgress?: (done: number, total: number) => void): Promise { + const useOPFS = file.size > 50 * 1024 * 1024 // 50 MB threshold + return new Promise((resolve, reject) => { + this.pending.set('encrypt', {resolve, reject}) + this.onProgress = onProgress + this.worker.postMessage({type: 'encrypt', file, fileName: file.name, useOPFS}) + }) + } + + async decrypt( + chunks: Uint8Array[], + key: Uint8Array, + nonce: Uint8Array, + size: number, + onProgress?: (done: number, total: number) => void + ): Promise { + return new Promise((resolve, reject) => { + this.pending.set('decrypt', {resolve, reject}) + this.onProgress = onProgress + this.worker.postMessage({ + type: 'decrypt', + chunks: chunks.map(c => c.buffer), + key, nonce, size + }, chunks.map(c => c.buffer)) + }) + } + + private handleMessage(msg: WorkerResponse) { + if (msg.type === 'progress') { + this.onProgress?.(msg.done, msg.total) + } else if (msg.type === 'encrypted') { + this.pending.get('encrypt')?.resolve({ + encData: msg.encData ? new Uint8Array(msg.encData) : null, // null when using OPFS + digest: msg.digest, + key: msg.key, + nonce: msg.nonce, + chunkSizes: msg.chunkSizes + }) + } else if (msg.type === 'decrypted') { + this.pending.get('decrypt')?.resolve({ + header: msg.header, + content: new Uint8Array(msg.content) + }) + } else if (msg.type === 'error') { + // Reject all pending + for (const p of this.pending.values()) p.reject(new Error(msg.message)) + } + } +} +``` + +## 5. OPFS Implementation + +### 5.1 Purpose + +For files approaching 100 MB, holding encrypted data in memory while uploading creates memory pressure. OPFS provides temporary file storage: +- Write encrypted data to OPFS as it's generated +- Read chunks from OPFS for upload +- Delete after upload completes + +### 5.2 When to Use + +- Files > 50 MB: Use OPFS +- Files ≤ 50 MB: In-memory (simpler, no OPFS overhead) + +Threshold is configurable. + +### 5.3 OPFS API + +```typescript +// In Web Worker (synchronous API for performance) +const root = await navigator.storage.getDirectory() +const fileHandle = await root.getFileHandle('encrypted-temp', {create: true}) +const accessHandle = await fileHandle.createSyncAccessHandle() + +// Write encrypted chunks as they're generated +accessHandle.write(encryptedChunk, {at: offset}) + +// Read chunk for upload +const chunk = new Uint8Array(chunkSize) +accessHandle.read(chunk, {at: chunkOffset}) + +// Cleanup +accessHandle.close() +await root.removeEntry('encrypted-temp') +``` + +### 5.4 Upload Flow with OPFS + +``` +1. Main: user drops file +2. Main → Worker: {type: 'encrypt', file} +3. Worker: + - Create OPFS temp file + - Encrypt 64KB at a time, write to OPFS + - Post progress every 64KB + - Compute digest + - Return {digest, key, nonce, chunkSizes} (data stays in OPFS) +4. Main: for each chunk: + - Main → Worker: {type: 'readChunk', offset, size} + - Worker: read from OPFS, return chunk + - Main: upload chunk via fetch() +5. Main → Worker: {type: 'cleanup'} +6. Worker: delete OPFS temp file +``` + +### 5.5 Download Flow with OPFS + +``` +1. Main: parse URL, get FileDescription +2. Main: for each chunk: + - Download via fetch() + - Main → Worker: {type: 'writeChunk', data, offset} + - Worker: write to OPFS temp file +3. Main → Worker: {type: 'decrypt', key, nonce, size} +4. Worker: + - Read from OPFS + - Decrypt, verify auth tag + - Return {header, content} +5. Main: trigger browser download +6. Main → Worker: {type: 'cleanup'} +``` + +## 6. Implementation Plan + +### 6.1 Phase A: fetch() Transport + +**Goal:** Replace `node:http2` with `fetch()` in `client.ts`. All existing Node.js tests pass. + +1. Rewrite `connectXFTP()` to use fetch() for handshake +2. Rewrite `sendXFTPCommand()` to use fetch() +3. Update `createXFTPChunk`, `uploadXFTPChunk`, `downloadXFTPChunk`, etc. +4. Remove `node:http2` import +5. Run existing Haskell integration tests — must pass + +**Files:** `client.ts` + +### 6.2 Phase B: Environment Abstraction + Web Worker + +**Goal:** Add `CryptoBackend` abstraction (§3) so the same code works in Node (direct) and browser (Worker). + +1. Create `env.ts` with `CryptoBackend` interface and `createCryptoBackend()` factory (as specified in §3) +2. Implement `DirectMemoryBackend` for Node.js +3. Create `crypto.worker.ts` that imports and calls existing crypto functions +4. Implement `WorkerMemoryBackend` for browser +5. Update `agent.ts` to use `createCryptoBackend()` instead of direct crypto calls +6. Existing tests pass (now using `DirectMemoryBackend`) + +**Files:** `env.ts`, `crypto.worker.ts`, `agent.ts` + +### 6.3 Phase C: OPFS Backend + +**Goal:** Large files (>50 MB) use OPFS for temp storage in browser. + +1. Implement `WorkerOPFSBackend` — uses OPFS sync API in worker +2. Add OPFS helpers in worker: read/write to temp file +3. Factory function now returns `WorkerOPFSBackend` for large files +4. Same `agent.ts` code works — only backend implementation differs + +**Files:** `env.ts`, `crypto.worker.ts` + +### 6.4 Phase D: Browser Testing + +**Goal:** Verify everything works in real browsers. + +1. Create minimal test HTML page +2. Test upload flow in Chrome, Firefox, Safari +3. Test download flow +4. Test progress reporting +5. Test cancellation +6. Test error handling (network failure, invalid file) + +## 7. Testing Strategy + +### 7.1 Test Layers + +The `CryptoBackend` abstraction (§3) enables testing at multiple levels without code duplication: + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ Layer 3: Browser Integration (Playwright) │ +│ - Web Worker message passing │ +│ - OPFS read/write │ +│ - Progress UI updates │ +│ - Real browser fetch() with CORS │ +├─────────────────────────────────────────────────────────────────┤ +│ Layer 2: Full Flow (Haskell-driven, Node.js) │ +│ - fetch() transport against real xftp-server │ +│ - Upload: encrypt → chunk → upload → build description │ +│ - Download: parse → download → verify → decrypt │ +│ - Cross-language: TS upload ↔ Haskell download (and vice versa) │ +├─────────────────────────────────────────────────────────────────┤ +│ Layer 1: Per-Function (Haskell-driven, Node.js) │ +│ - 172 existing tests │ +│ - Byte-identical output vs Haskell functions │ +└─────────────────────────────────────────────────────────────────┘ +``` + +### 7.2 Layer 1: Per-Function Tests (Existing) + +Existing Haskell-driven tests in `XFTPWebTests.hs`. Each test calls a TypeScript function via Node and compares output with Haskell. + +```bash +cabal test --ghc-options -O0 --test-option='--match=/XFTP Web Client/' +``` + +All 172 tests must pass. No changes needed for browser transport work. + +### 7.3 Layer 2: Full Flow Tests (Node.js + fetch) + +Haskell-driven integration tests using Node.js native fetch(). These test the complete upload/download flow without Worker/OPFS. + +```haskell +-- XFTPWebTests.hs (extends existing test file) +it "fetch transport: upload and download round-trip" $ do + withXFTPServer testXFTPServerConfigSNI $ \server -> do + -- TypeScript uploads via fetch(), returns URI + uri <- jsOut $ callTS "src/agent" "uploadFileTest" serverAddrHex <> testFileHex + -- TypeScript downloads via fetch() + content <- jsOut $ callTS "src/agent" "downloadFileTest" uriHex + content `shouldBe` testFileContent + +it "fetch transport: TS upload, Haskell download" $ do + withXFTPServer testXFTPServerConfigSNI $ \server -> do + uri <- jsOut $ callTS "src/agent" "uploadFileTest" serverAddrHex <> testFileHex + -- Haskell agent downloads using existing xftp CLI pattern + outPath <- withAgent 1 agentCfg initAgentServers testDB $ \a -> do + rfId <- xftpReceiveFile' a 1 uri Nothing + waitRfDone a + content <- B.readFile outPath + content `shouldBe` testFileContent +``` + +**What this tests:** +- fetch() handshake (challenge-response, TLS session binding) +- fetch() command execution (FNEW, FPUT, FGET, FACK) +- Streaming request/response bodies +- Full encrypt → upload → download → decrypt flow + +**What this doesn't test:** +- Web Worker message passing +- OPFS storage +- Browser-specific fetch() behavior (CORS preflight, etc.) + +### 7.4 Layer 3: Browser Integration Tests (Playwright) + +Playwright tests run in real browsers, testing browser-specific functionality. + +**Test infrastructure:** + +``` +xftp-web/ +├── test/ +│ ├── browser.test.ts # Playwright test file +│ └── test-server.ts # Spawns xftp-server for tests +└── test-page/ + ├── index.html # Minimal test UI + └── test-harness.ts # Exposes test functions to window +``` + +**Running browser tests:** + +```bash +cd xftp-web +npm run test:browser # Spawns xftp-server, runs Playwright +``` + +**Test cases:** + +```typescript +// test/browser.test.ts +import { test, expect } from '@playwright/test' +import { spawn } from 'child_process' + +let serverProcess: ChildProcess + +test.beforeAll(async () => { + // Spawn xftp-server with SNI cert for browser TLS + serverProcess = spawn('xftp-server', ['start', '-c', 'test-config.ini']) + await waitForServer() +}) + +test.afterAll(async () => { + serverProcess.kill() +}) + +test('small file upload/download (in-memory)', async ({ page }) => { + await page.goto('/test-page/') + + const result = await page.evaluate(async () => { + const data = new Uint8Array(1024 * 1024) // 1 MB + crypto.getRandomValues(data) + const file = new File([data], 'small.bin') + + const uri = await window.xftp.uploadFile(file) + const downloaded = await window.xftp.downloadFile(uri) + + return { + uploadedSize: data.length, + downloadedSize: downloaded.length, + match: arraysEqual(data, downloaded), + usedOPFS: window.xftp.lastUploadUsedOPFS + } + }) + + expect(result.match).toBe(true) + expect(result.usedOPFS).toBe(false) // Small file, no OPFS +}) + +test('large file upload/download (OPFS)', async ({ page }) => { + await page.goto('/test-page/') + + const result = await page.evaluate(async () => { + const data = new Uint8Array(60 * 1024 * 1024) // 60 MB + crypto.getRandomValues(data) + const file = new File([data], 'large.bin') + + const uri = await window.xftp.uploadFile(file) + const downloaded = await window.xftp.downloadFile(uri) + + return { + match: arraysEqual(data, downloaded), + usedOPFS: window.xftp.lastUploadUsedOPFS + } + }) + + expect(result.match).toBe(true) + expect(result.usedOPFS).toBe(true) // Large file, used OPFS +}) + +test('progress events fire during upload', async ({ page }) => { + await page.goto('/test-page/') + + const progressEvents = await page.evaluate(async () => { + const events: number[] = [] + const data = new Uint8Array(10 * 1024 * 1024) // 10 MB + const file = new File([data], 'progress.bin') + + await window.xftp.uploadFile(file, (done, total) => { + events.push(done / total) + }) + + return events + }) + + expect(progressEvents.length).toBeGreaterThan(1) + expect(progressEvents[progressEvents.length - 1]).toBe(1) // 100% at end +}) + +test('Web Worker keeps UI responsive', async ({ page }) => { + await page.goto('/test-page/') + + // Start upload and measure main thread responsiveness + const result = await page.evaluate(async () => { + const data = new Uint8Array(50 * 1024 * 1024) // 50 MB + const file = new File([data], 'responsive.bin') + + let frameCount = 0 + let uploadDone = false + + // Count animation frames during upload + function countFrames() { + frameCount++ + if (!uploadDone) requestAnimationFrame(countFrames) + } + requestAnimationFrame(countFrames) + + const start = performance.now() + await window.xftp.uploadFile(file) + uploadDone = true + const elapsed = performance.now() - start + + // If main thread was blocked, frameCount would be very low + const expectedFrames = (elapsed / 1000) * 30 // ~30 fps minimum + return { frameCount, expectedFrames, elapsed } + }) + + // Should maintain reasonable frame rate (Worker offloaded crypto) + expect(result.frameCount).toBeGreaterThan(result.expectedFrames * 0.5) +}) +``` + +### 7.5 Cross-Browser Matrix + +| Browser | fetch streaming | Web Worker | OPFS sync | Status | +|---------|----------------|------------|-----------|--------| +| Chrome 105+ | ✓ | ✓ | ✓ | Primary target | +| Firefox 111+ | ✓ | ✓ | ✓ | Supported | +| Safari 16.4+ | ✓ | ✓ | ✓ | Supported | +| Edge 105+ | ✓ | ✓ | ✓ | Supported (Chromium) | + +Playwright tests run against Chrome by default. CI can run against all browsers. + +### 7.6 Test Execution Summary + +| Phase | Test Layer | Command | What's Verified | +|-------|-----------|---------|-----------------| +| A | Layer 1 + 2 | `cabal test --test-option='--match=/XFTP Web Client/'` | fetch() transport, full flow | +| B | Layer 3 | `npm run test:browser` | Worker message passing, progress | +| C | Layer 3 | `npm run test:browser` | OPFS storage for large files | +| D | Layer 3 | `npm run test:browser -- --project=firefox,webkit` | Cross-browser | diff --git a/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-page.md b/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-page.md new file mode 100644 index 0000000000..b69234de82 --- /dev/null +++ b/rfcs/2026-01-30-send-file-page/2026-02-04-xftp-web-page.md @@ -0,0 +1,772 @@ +# Send File Web Page — Implementation Plan + +## TOC +1. Executive Summary +2. Architecture +3. CryptoBackend & Web Worker +4. Server Configuration +5. Page Structure & UI +6. Upload Flow +7. Download Flow +8. Build & Dev Setup +9. agent.ts Changes +10. Testing +11. Files +12. Implementation Order + +## 1. Executive Summary + +Build a static web page for browser-based XFTP file transfer (Phase 5 of master RFC). The page supports upload (drag-drop → encrypt → upload → shareable link) and download (open link → download → decrypt → save). Crypto runs in a Web Worker; large files use OPFS temp storage. + +Two build variants: +- **Local**: single test server at `localhost:7000` (development/testing) +- **Production**: 12 preset XFTP servers (6 SimpleX + 6 Flux) + +Uses Vite for bundling (already a dependency via vitest). No CSS framework — plain CSS per RFC spec. + +## 2. Architecture + +``` +xftp-web/ +├── src/ # Library (existing, targeted changes) +│ ├── agent.ts # Modified: uploadFile readChunk, downloadFileRaw +│ ├── client.ts # Modified: downloadXFTPChunkRaw +│ ├── crypto/ # Unchanged +│ ├── download.ts # Unchanged +│ └── protocol/ +│ └── description.ts # Fix: SHA-256 → SHA-512 comment on digest field +├── web/ # Web page (new) +│ ├── index.html # Entry point (CSP meta tag) +│ ├── main.ts # Router + sodium.ready init +│ ├── upload.ts # Upload UI + orchestration +│ ├── download.ts # Download UI + orchestration +│ ├── progress.ts # Circular progress canvas component +│ ├── servers.ts # Server list (build-time configured, imports servers.json) +│ ├── servers.json # Preset server addresses (shared with vite.config.ts) +│ ├── crypto-backend.ts # CryptoBackend interface + WorkerBackend +│ ├── crypto.worker.ts # Web Worker: encrypt/decrypt/OPFS +│ └── style.css # Minimal styling +├── vite.config.ts # Page build config (new) +├── tsconfig.web.json # IDE/CI type-check for web/ (new) +├── tsconfig.worker.json # IDE/CI type-check for worker (new) +├── playwright.config.ts # Page E2E test config (new) +├── vitest.config.ts # Test config (existing) +├── .gitignore # Existing (add dist-web/) +└── test/ # Tests (existing + new page test) +``` + +Data flow: + +``` + ┌───────────────────────────────────────────┐ + │ Main Thread │ + │ │ + │ Upload: upload.ts ──► agent.ts ──► fetch()│ + │ Download: download.ts ──► agent.ts ──► fetch() + │ │ │ + │ postMessage HTTP/2 │ + │ ▼ ▼ + │ ┌─────────────────┐ ┌──────────┐│ + │ │ Web Worker │ │ XFTP ││ + │ │ crypto.worker.ts │ │ Server ││ + │ │ ┌─────────────┐ │ └──────────┘│ + │ │ │ OPFS temp │ │ │ + │ │ └─────────────┘ │ │ + │ └─────────────────┘ │ + └───────────────────────────────────────────┘ +``` + +Both upload and download use `agent.ts` for orchestration (connection pooling, parallel chunk transfers, redirect handling). Upload uses a `readChunk` callback for Worker data access. Download uses an `onRawChunk` callback to route raw encrypted chunks to the Worker for decryption (see §7.2). ACK is the caller's responsibility — `downloadFileRaw` returns the resolved `FileDescription` without ACKing, so the caller can verify integrity before acknowledging. + +## 3. CryptoBackend & Web Worker + +### 3.1 Interface + +```typescript +// crypto-backend.ts +export interface CryptoBackend { + // Upload: encrypt file, store encrypted data in OPFS + encrypt(data: Uint8Array, fileName: string, + onProgress?: (done: number, total: number) => void + ): Promise + + // Upload: read encrypted chunk from OPFS (called by agent.ts via readChunk callback) + readChunk(offset: number, size: number): Promise + + // Download: transit-decrypt raw chunk and store in OPFS + decryptAndStoreChunk( + dhSecret: Uint8Array, nonce: Uint8Array, + body: Uint8Array, digest: Uint8Array, chunkNo: number + ): Promise + + // Download: verify digest + file-level decrypt all stored chunks + // Only needs size/digest/key/nonce — not the full FileDescription (avoids sending private keys to Worker) + verifyAndDecrypt(params: {size: number, digest: Uint8Array, key: Uint8Array, nonce: Uint8Array} + ): Promise<{header: FileHeader, content: Uint8Array}> + + cleanup(): Promise +} + +// Structurally identical to EncryptedFileMetadata from agent.ts (§9.1). +// Kept separate to avoid crypto-backend.ts importing from agent.ts +// (which would pull in node:http2 via client.ts, breaking Worker bundling). +// TypeScript structural typing makes them assignment-compatible. +export interface EncryptResult { + digest: Uint8Array + key: Uint8Array + nonce: Uint8Array + chunkSizes: number[] +} +``` + +### 3.2 Factory + +```typescript +export function createCryptoBackend(): CryptoBackend { + if (typeof Worker === 'undefined') { + throw new Error('Web Workers required — update your browser') + } + return new WorkerBackend() +} +``` + +The Worker always uses OPFS for temp storage (single code path — no memory/disk branching). OPFS I/O overhead is negligible relative to crypto and network time. Each Worker session creates a unique directory in OPFS root named `session--`, containing `upload.bin` and `download.bin` as needed. `cleanup()` deletes the entire session directory. On Worker startup (before processing messages), sweep OPFS root and delete any `session-*` directories whose embedded timestamp (parsed from the name) is older than 1 hour — this handles stale files from crashed tabs. The OPFS API does not expose directory timestamps, so the name-encoded timestamp is the only reliable mechanism. This prevents cross-tab collisions and unbounded OPFS growth. + +### 3.3 Worker message protocol + +Every request carries a numeric `id`. Responses carry the same `id`. WorkerBackend maintains a `Map` to match responses to pending promises. + +Main → Worker (fields marked `†` are Transferable — arrive as `ArrayBuffer` in Worker, must be wrapped with `new Uint8Array(...)` before use): +- `{id: number, type: 'encrypt', data†: ArrayBuffer, fileName: string}` — encrypt file, store in OPFS +- `{id: number, type: 'readChunk', offset: number, size: number}` — read encrypted chunk from OPFS +- `{id: number, type: 'decryptAndStoreChunk', dhSecret: Uint8Array, nonce: Uint8Array, body†: ArrayBuffer, chunkDigest: Uint8Array, chunkNo: number}` — transit-decrypt + store in OPFS. `chunkDigest` is the per-chunk SHA-256 digest (verified by `decryptReceivedChunk`). Distinct from the file-level SHA-512 digest in `verifyAndDecrypt`. +- `{id: number, type: 'verifyAndDecrypt', size: number, digest: Uint8Array, key: Uint8Array, nonce: Uint8Array}` — verify digest + file-level decrypt all chunks. Only the four fields needed for verification/decryption are sent — not the full `FileDescription`, which contains private replica keys that the Worker doesn't need. +- `{id: number, type: 'cleanup'}` — delete OPFS temp files + +Worker → Main (fields marked `†` are Transferable): +- `{id: number, type: 'progress', done: number, total: number}` — encryption/decryption progress (fire-and-forget, no promise) +- `{id: number, type: 'encrypted', digest: Uint8Array, key: Uint8Array, nonce: Uint8Array, chunkSizes: number[]}` — all fields structured-cloned (not transferred) +- `{id: number, type: 'chunk', data†: ArrayBuffer}` — readChunk response +- `{id: number, type: 'stored'}` — decryptAndStore acknowledgment +- `{id: number, type: 'decrypted', header: FileHeader, content†: ArrayBuffer}` — verifyAndDecrypt response +- `{id: number, type: 'cleaned'}` +- `{id: number, type: 'error', message: string}` — rejects the pending promise for this `id` + +All messages carrying large `ArrayBuffer` payloads use `postMessage(msg, [transferables])` to transfer ownership instead of structured-clone copying. Only `ArrayBuffer` can be transferred — `Uint8Array`, `number[]`, and other types are always structured-cloned. This applies to: `encrypt` request (`data`), `readChunk` response (`data`), `decryptAndStoreChunk` request (`body`), and `verifyAndDecrypt` response (`content`). The `WorkerBackend` implementation must ensure the transferred `ArrayBuffer` covers the full `Uint8Array` — if `byteOffset !== 0` or `byteLength !== buffer.byteLength`, slice first: `data.buffer.slice(data.byteOffset, data.byteOffset + data.byteLength)`. This is required for `decryptAndStore` request bodies: `sendXFTPCommand` returns `body = fullResp.subarray(XFTP_BLOCK_SIZE)`, which has `byteOffset = XFTP_BLOCK_SIZE`. Other payloads are full-buffer views (§6 step 3 creates `new Uint8Array(await file.arrayBuffer())`; Worker responses allocate fresh buffers) but `WorkerBackend` should guard unconditionally. + +### 3.4 Worker internals + +**Imports:** The Worker imports directly from `libsodium-wrappers-sumo` (for `await sodium.ready`), `src/crypto/file.js` (`encryptFile`, `encodeFileHeader`, `decryptChunks`), `src/crypto/digest.js` (`sha512`), `src/protocol/chunks.js` (`prepareChunkSizes`, `fileSizeLen`, `authTagSize`), `src/protocol/encoding.js` (`concatBytes`), and `src/download.js` (`decryptReceivedChunk`). `download.js` directly imports `src/protocol/client.js` (for `decryptTransportChunk`). These transitively pull in `src/crypto/secretbox.js`, `src/crypto/keys.js`, and `src/crypto/padding.js`. None of these import `src/agent.ts` or `src/client.ts` — those pull in `node:http2` via dynamic import which would break Worker bundling. Vite tree-shakes the transitive deps automatically. Note: `download.js` → `protocol/client.js` → `crypto/keys.js` transitively pulls in `@noble/curves` (~50-80KB). This is unavoidable since `decryptTransportChunk` needs `dh` from `keys.js`. If Worker bundle size becomes a concern, `decryptReceivedChunk` could be refactored out of `download.js` into a separate module that doesn't import `protocol/client.js`. + +**ArrayBuffer → Uint8Array conversion:** All Transferable fields arrive in the Worker as `ArrayBuffer`. The Worker's message handler must wrap them before passing to library functions: `new Uint8Array(msg.data)` for encrypt, `new Uint8Array(msg.body)` for decryptAndStore. Non-transferred fields (`dhSecret`, `nonce`, `digest`, `chunkSizes`) arrive as their original types (`Uint8Array` / `number[]`) via structured clone. + +The Worker's encrypt handler calls the same functions as `encryptFileForUpload` in agent.ts (key/nonce generation → `encryptFile` → `sha512` → `prepareChunkSizes`). This is not reimplementation — it's calling the same library functions from a different entry point. + +**Libsodium init:** Both the Worker and the main thread must `await sodium.ready` before calling any crypto functions that use libsodium. The Worker does this once on startup before processing messages. The main thread needs it before `connectXFTP` (which uses libsodium via `verifyIdentityProof`) and before `downloadXFTPChunkRaw` (which uses libsodium via `generateX25519KeyPair` + `dh`). In practice, `main.ts` calls `await sodium.ready` at page load, before any XFTP calls. + +Encrypt (mirrors `encryptFileForUpload` in agent.ts): +1. Generate key (32B) + nonce (24B) via `crypto.getRandomValues` +2. `fileHdr = encodeFileHeader({fileName, fileExtra: null})` +3. `fileSize = BigInt(fileHdr.length + source.length)` +4. `payloadSize = Number(fileSize) + fileSizeLen + authTagSize` +5. `chunkSizes = prepareChunkSizes(payloadSize)` +6. `encSize = BigInt(chunkSizes.reduce((a, b) => a + b, 0))` +7. `encData = encryptFile(source, fileHdr, key, nonce, fileSize, encSize)` +8. `digest = sha512(encData)` — note: the `digest` field comment in `FileDescription` in `description.ts` says "SHA-256" but the actual hash is SHA-512 everywhere (`sha512` in agent.ts and download.ts). Fix the comment during implementation. +9. Open OPFS upload file via `createSyncAccessHandle`, write `encData`, flush, close handle. Null out `encData` reference. +10. Reopen the same OPFS file with `createSyncAccessHandle` as a persistent read handle (stored on the Worker module scope). This handle is used by all subsequent `readChunk` calls and closed on `cleanup`. +11. Post back `{digest, key, nonce, chunkSizes}` (no encData transfer — data stays in OPFS) + +readChunk: +- Use the persistent read handle: `handle.read(buf, {at: offset})` → return slice as transferable ArrayBuffer. OPFS allows only one `FileSystemSyncAccessHandle` per file; the persistent handle avoids per-call open/close overhead. + +decryptAndStoreChunk (removes transport encryption only — stored data is still file-level encrypted): +1. `decryptReceivedChunk(dhSecret, nonce, new Uint8Array(body), chunkDigest)` → transit-decrypted chunk data (still file-level encrypted — only the transport layer is removed). Argument order matches signature `(dhSecret, cbNonce, encData, expectedDigest)` from download.ts. `body` arrives as `ArrayBuffer` via Transferable and must be wrapped; `dhSecret`, `nonce`, `chunkDigest` arrive as `Uint8Array` via structured clone. +2. On first call, open the OPFS download temp file via `createSyncAccessHandle` and store as a persistent write handle. Record `{chunkNo, size: decrypted.length}` in an in-memory `chunkMeta: Map` — offset is the running sum of sizes for chunks stored so far (chunks may arrive out of order with `concurrency > 1`, so offset is assigned as `currentFileOffset`, then `currentFileOffset += size`) +3. Write decrypted chunk to the persistent handle at the recorded offset + +verifyAndDecrypt (mirrors size/digest checks in agent.ts `downloadFile`): +1. Close the persistent download write handle (flush first), then reopen as a read handle. Read each chunk from OPFS into a `Uint8Array[]` array, ordered by `chunkNo`: for each entry in `chunkMeta` sorted by `chunkNo`, `handle.read(buf, {at: offset})` with the recorded offset and size +2. Concatenate for verification: `combined = concatBytes(...chunks)` +3. Verify total size: `combined.length === params.size` +4. Verify SHA-512 digest: `sha512(combined)` matches `params.digest` +5. Decrypt: `decryptChunks(BigInt(params.size), chunks, params.key, params.nonce)` — `params.size` is the encrypted file size (`fd.size` = `sum(chunkSizes)` = `decryptChunks`' first param `encSize`). Called directly instead of via `processDownloadedFile` (which expects a full `FileDescription`). Pass the original `chunks` array (not `combined`), as `decryptChunks` handles concatenation internally. +6. Delete OPFS download temp file +7. Return `{header, content}` via transferable ArrayBuffer + +### 3.5 Browser requirements + +The page requires a modern browser with Web Worker and OPFS support: +- Chrome 102+, Firefox 114+, Safari 15.2+ (Workers + OPFS + ES module Workers — Firefox added module Worker support in 114) +- If Worker or OPFS is unavailable, the page shows an error message rather than falling back silently. + +No `DirectBackend` is needed — the page is browser-only, and tests run in vitest browser mode (real Chromium). The existing library tests (`test/browser.test.ts`) test the crypto/upload/download pipeline directly without Workers. + +## 4. Server Configuration + +### 4.1 Server lists + +`web/servers.json` — single source of truth for preset server addresses (imported by both `servers.ts` and `vite.config.ts`): + +```json +{ + "simplex": [ + "xftp://da1aH3nOT-9G8lV7bWamhxpDYdJ1xmW7j3JpGaDR5Ug=@xftp1.simplex.im", + "xftp://5vog2Imy1ExJB_7zDZrkV1KDWi96jYFyy9CL6fndBVw=@xftp2.simplex.im", + "xftp://PYa32DdYNFWi0uZZOprWQoQpIk5qyjRJ3EF7bVpbsn8=@xftp3.simplex.im", + "xftp://k_GgQl40UZVV0Y4BX9ZTyMVqX5ZewcLW0waQIl7AYDE=@xftp4.simplex.im", + "xftp://-bIo6o8wuVc4wpZkZD3tH-rCeYaeER_0lz1ffQcSJDs=@xftp5.simplex.im", + "xftp://6nSvtY9pJn6PXWTAIMNl95E1Kk1vD7FM2TeOA64CFLg=@xftp6.simplex.im" + ], + "flux": [ + "xftp://92Sctlc09vHl_nAqF2min88zKyjdYJ9mgxRCJns5K2U=@xftp1.simplexonflux.com", + "xftp://YBXy4f5zU1CEhnbbCzVWTNVNsaETcAGmYqGNxHntiE8=@xftp2.simplexonflux.com", + "xftp://ARQO74ZSvv2OrulRF3CdgwPz_AMy27r0phtLSq5b664=@xftp3.simplexonflux.com", + "xftp://ub2jmAa9U0uQCy90O-fSUNaYCj6sdhl49Jh3VpNXP58=@xftp4.simplexonflux.com", + "xftp://Rh19D5e4Eez37DEE9hAlXDB3gZa1BdFYJTPgJWPO9OI=@xftp5.simplexonflux.com", + "xftp://0AznwoyfX8Od9T_acp1QeeKtxUi676IBIiQjXVwbdyU=@xftp6.simplexonflux.com" + ] +} +``` + +`web/servers.ts`: + +```typescript +import {parseXFTPServer, type XFTPServer} from '../src/protocol/address.js' +import presets from './servers.json' + +declare const __XFTP_SERVERS__: string[] + +const serverAddresses: string[] = typeof __XFTP_SERVERS__ !== 'undefined' + ? __XFTP_SERVERS__ + : [...presets.simplex, ...presets.flux] + +export function getServers(): XFTPServer[] { + return serverAddresses.map(parseXFTPServer) +} + +export function pickRandomServer(servers: XFTPServer[]): XFTPServer { + return servers[Math.floor(Math.random() * servers.length)] +} +``` + +### 4.2 Build-time injection + +`vite.config.ts` defines `__XFTP_SERVERS__`: +- `mode === 'local'`: `["xftp://@localhost:7000"]` +- `mode === 'production'`: not defined → falls through to hardcoded list + +### 4.3 Assumption + +Production XFTP servers must have `[WEB]` section configured with a CA-signed certificate for browser TLS. Without this, browsers will reject the self-signed XFTP identity cert. The local test server uses `tests/fixtures/` certs which Chromium accepts via `ignoreHTTPSErrors`. + +## 5. Page Structure & UI + +### 5.1 Routing + +`main.ts` checks `window.location.hash` once on page load: +- Hash present → download mode +- Hash absent → upload mode + +No `hashchange` listener — the shareable link opens in a new tab. Simple page-load routing. + +### 5.2 Upload UI states + +1. **Landing**: Drag-drop zone centered, file picker button, size limit note +2. **Uploading**: Circular progress (canvas), percentage, cancel button +3. **Complete**: Shareable link (input + copy button), "Install SimpleX" CTA +4. **Error**: Error message + retry button. On server-unreachable, auto-retry with exponential backoff (1s, 2s, 4s, up to 3 attempts) before showing the error state. + +### 5.3 Download UI states + +1. **Ready**: Approximate file size displayed (encrypted size from `fd.size` or `fd.redirect.size` — see §7 step 2; file name is unavailable — it's inside the encrypted content), download button +2. **Downloading**: Circular progress, percentage +3. **Complete**: Browser save dialog triggered automatically +4. **Error**: Error message (expired, corrupted, unreachable) + +### 5.4 Security summary (RFC §7.4) + +Both upload-complete and download-ready states display a brief non-technical security summary: +- Files are encrypted in the browser before upload — the server never sees file contents. +- The link contains the decryption key in the hash fragment, which the browser never sends to any server. +- For maximum security, use the SimpleX app. + +### 5.5 File expiry + +Display on upload-complete state: "Files are typically available for 48 hours." This is an approximation — actual expiry depends on each XFTP server's `[STORE_LOG]` retention configuration. The 48-hour figure matches the current preset server defaults. + +### 5.6 Styling + +Plain CSS, no framework. White background, centered content, responsive. Circular progress via `` (arc drawing, percentage text in center). + +File size limit: 100MB. Displayed on upload page. + +### 5.7 CSP + +`index.html` includes a `` Content-Security-Policy tag with a build-time placeholder: + +```html + +``` + +Vite's `transformIndexHtml` hook (in `vite.config.ts`) replaces `__CSP_CONNECT_SRC__` at build time with origins derived from the server list: +- Local mode: `https://localhost:7000` +- Production: `https://xftp1.simplex.im:443 https://xftp2.simplex.im:443 ...` (all 12 servers) + +## 6. Upload Flow + +`web/upload.ts`: + +1. User drops/picks file → `File` object +2. Validate `file.size <= 100 * 1024 * 1024` — show error if exceeded +3. Read file: `new Uint8Array(await file.arrayBuffer())` — note: after `backend.encrypt()` transfers the buffer to the Worker, `fileData` is detached (zero-length). Peak memory is ~2× file size (main thread holds original until transfer, Worker holds encrypted copy before OPFS write). Acceptable for the 100MB limit; do not raise the limit without considering memory implications. +4. Create `CryptoBackend` via factory +5. Create `XFTPClientAgent` +6. `backend.encrypt(fileData, file.name, onProgress)` → `EncryptResult` + - Encryption progress shown on canvas (Worker posts progress messages) +7. Pick one random server from configured list (V1: all chunks to same server) +8. Call `uploadFile(agent, server, metadata, {onProgress, readChunk: (off, sz) => backend.readChunk(off, sz)})`: + - `metadata` = `{digest, key, nonce, chunkSizes}` from EncryptResult + - Network progress shown on canvas + - Returns `{rcvDescription, sndDescription, uri}` +9. Construct full URL: `window.location.origin + window.location.pathname + '#' + uri` +10. Display link, copy button +11. Cleanup: `backend.cleanup()`, `closeXFTPAgent(agent)` + +**Cancel:** User can abort via cancel button. Sets an `AbortController` signal that: +- Sends `{type: 'cleanup'}` to Worker +- Closes the XFTPClientAgent (drops HTTP/2 connections) +- Resets UI to landing state + +## 7. Download Flow + +`web/download.ts`: + +1. Parse `window.location.hash.slice(1)` → `decodeDescriptionURI(fragment)` → `FileDescription` +2. Display file size (`fd.size` bytes, formatted human-readable). Note: `fd.size` is the encrypted size (slightly larger than plaintext due to padding + auth tag). The plaintext size is not available until decryption — display it as an approximate file size. If `fd.redirect !== null`, size comes from `fd.redirect.size` (which is the inner encrypted size). +3. User clicks "Download" +4. Create `CryptoBackend` and `XFTPClientAgent` +5. Call `downloadFileRaw(agent, fd, onRawChunk, {onProgress, concurrency: 3})`: + - `onRawChunk` forwards each raw chunk to the Worker: `backend.decryptAndStoreChunk(raw.dhSecret, raw.nonce, raw.body, raw.digest, raw.chunkNo)` + - `downloadFileRaw` handles redirect resolution internally (see §7.1), parallel downloads, and connection pooling + - Returns the resolved `FileDescription` (inner fd for redirect case, original fd otherwise) +6. `backend.verifyAndDecrypt({size: resolvedFd.size, digest: resolvedFd.digest, key: resolvedFd.key, nonce: resolvedFd.nonce})` → `{header, content}` + - Verifies size + SHA-512 digest + file-level decryption inside Worker. Only the four needed fields are sent — private replica keys stay on the main thread. +7. ACK: `ackFileChunks(agent, resolvedFd)` — best-effort, after verification succeeds +8. Sanitize `header.fileName` before use: strip path separators (`/`, `\`), replace null/control characters (U+0000-U+001F, U+007F), strip Unicode bidi override characters (U+202A-U+202E, U+2066-U+2069 — prevents `doc.pdf.exe` appearing as `doc.exe.pdf`), limit length to 255 chars. The filename is user-controlled (set by the uploader) and arrives via decrypted content. Then trigger browser save: `new Blob([content])` → `` click +9. Cleanup: `backend.cleanup()`, `closeXFTPAgent(agent)` + +### 7.1 Redirect handling + +Handled inside `downloadFileRaw` in agent.ts — the web page doesn't see it. When `fd.redirect !== null`: + +1. Download redirect chunks via `downloadXFTPChunkRaw` (parallel, same as regular chunks) +2. Transit-decrypt + verify + file-level decrypt on main thread (redirect data is always small — a few KB of YAML, so main thread decryption is fine) +3. Parse YAML → inner `FileDescription`, validate against `fd.redirect.{size, digest}` +4. ACK redirect chunks (best-effort) +5. Continue downloading inner description's chunks, calling `onRawChunk` for each + +### 7.2 Architecture note: download refactoring + +Both upload and download use `agent.ts` for orchestration. The key difference is where the crypto/network split happens: + +- **Upload**: agent.ts reads encrypted chunks from the Worker via `readChunk` callback, sends them over the network. +- **Download**: agent.ts receives raw encrypted responses from the network via `downloadXFTPChunkRaw` (DH key exchange + network only, no decryption), passes them to the web page via `onRawChunk` callback, which routes them to the Worker for transit decryption. + +This split keeps all expensive crypto off the main thread. Transit decryption uses a custom JS Salsa20 implementation (`xorKeystream` in secretbox.ts) that would block the UI for ~50-200ms on a 4MB chunk. File-level decryption (`decryptChunks`) is similarly expensive. Both happen in the Worker. + +The cheap operations stay on the main thread: DH key exchange (`generateX25519KeyPair` + `dh` — ~1ms via libsodium WASM), XFTP command encoding/decoding, connection management. + +## 8. Build & Dev Setup + +### 8.1 vite.config.ts (new, separate from vitest.config.ts) + +```typescript +import {defineConfig, type Plugin} from 'vite' +import {readFileSync} from 'fs' +import {createHash} from 'crypto' +import presets from './web/servers.json' + +function parseHost(addr: string): string { + const m = addr.match(/@(.+)$/) + if (!m) throw new Error('bad server address: ' + addr) + const host = m[1].split(',')[0] + return host.includes(':') ? host : host + ':443' +} + +function cspPlugin(servers: string[]): Plugin { + const origins = servers.map(s => 'https://' + parseHost(s)).join(' ') + return { + name: 'csp-connect-src', + transformIndexHtml: { + order: 'pre', + handler(html, ctx) { + if (ctx.server) { + // Dev mode: remove CSP meta tag entirely — Vite HMR needs inline scripts + return html.replace(/]*?Content-Security-Policy[\s\S]*?>/i, '') + } + return html.replace('__CSP_CONNECT_SRC__', origins) + } + } + } +} + +export default defineConfig(({mode}) => { + const define: Record = {} + let servers: string[] + + if (mode === 'local') { + const pem = readFileSync('../tests/fixtures/ca.crt', 'utf-8') + const der = Buffer.from(pem.replace(/-----[^-]+-----/g, '').replace(/\s/g, ''), 'base64') + const fp = createHash('sha256').update(der).digest('base64') + .replace(/\+/g, '-').replace(/\//g, '_') + servers = [`xftp://${fp}@localhost:7000`] + define['__XFTP_SERVERS__'] = JSON.stringify(servers) + } else { + servers = [...presets.simplex, ...presets.flux] + } + + return { + root: 'web', + build: {outDir: '../dist-web'}, + define, + worker: {format: 'es'}, + plugins: [cspPlugin(servers)], + } +}) +``` + +### 8.2 package.json scripts + +```json +"dev": "vite --mode local", +"build:local": "vite build --mode local", +"build:prod": "vite build --mode production", +"preview": "vite preview", +"check:web": "tsc -p tsconfig.web.json --noEmit && tsc -p tsconfig.worker.json --noEmit" +``` + +Note: `check:web` type-checks `src/` twice (once per config) — acceptable for this small library. + +Add `vite` as an explicit devDependency (`^6.0.0` — matching the version vitest 3.x depends on transitively). Relying on transitive resolution is fragile across package managers. + +### 8.3 TypeScript configuration + +The existing `tsconfig.json` has `rootDir: "src"` and `include: ["src/**/*.ts"]` — this is for library compilation only (output to `dist/`). Vite handles `web/` TypeScript compilation independently via esbuild, so the main tsconfig is unchanged. `web/*.ts` files import from `../src/*.js` using relative paths. + +Add two tsconfigs for `web/` type-checking — split by environment to avoid type pollution between DOM and WebWorker globals: + +`tsconfig.web.json` — main-thread files (DOM globals: `document`, `window`, etc.): + +```json +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "rootDir": ".", + "noEmit": true, + "types": [], + "moduleResolution": "bundler", + "lib": ["ES2022", "DOM"] + }, + "include": ["web/**/*.ts", "src/**/*.ts"], + "exclude": ["web/crypto.worker.ts"] +} +``` + +`tsconfig.worker.json` — Worker file (`self`, `FileSystemSyncAccessHandle`, etc.): + +```json +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "rootDir": ".", + "noEmit": true, + "types": [], + "moduleResolution": "bundler", + "lib": ["ES2022", "WebWorker"] + }, + "include": ["web/crypto.worker.ts", "src/**/*.ts"] +} +``` + +Both configs set `"types": []` to prevent auto-inclusion of `@types/node` and `"moduleResolution": "bundler"` for Vite-compatible resolution (JSON imports, `.js` extension mapping). The base config's `"moduleResolution": "node"` would cause false type errors on `import ... from './servers.json'`. Both override `@types/node`, which would pollute DOM/WebWorker environments with Node.js globals (`process`, `Buffer`, etc.). This means `src/client.ts`'s dynamic `import("node:http2")` will produce a type error in these configs. This is acceptable — `src/client.ts` provides `createNodeTransport` which is never used in browser code (Vite tree-shakes it out), and full `src/` type-checking is handled by the base `tsconfig.json`. If the error is distracting, add `src/client.ts` to both configs' `exclude` arrays. + +Both extend the library tsconfig (inheriting `strict`, `module`, etc.) and include `src/**/*.ts` so imports from `../src/*.js` resolve. `"noEmit": true` means they're only used for type-checking — Vite handles actual compilation. The inherited `"exclude": ["node_modules", "dist", "test"]` intentionally excludes `test/` — test files are type-checked by their own vitest/playwright configs, not by `check:web`. + +### 8.4 Dev workflow + +`npm run dev` → Vite dev server at `localhost:5173`, configured for local test server. Start `xftp-server` on port 7000 separately (or via the existing globalSetup). + +Note: The CSP meta tag's `default-src 'self'` blocks Vite's injected HMR inline scripts in dev mode. The `cspPlugin` handles this by removing the entire CSP `` tag in serve mode (dev server), so HMR works without restrictions. Production builds always have the correct CSP. + +## 9. Library Changes (agent.ts + client.ts) + +Changes to support the web page: upload `readChunk` callback, download `onRawChunk` callback with parallel chunk downloads. + +### 9.1 Type changes + +Split the existing `EncryptedFileInfo` (which currently has `encData`, `digest`, `key`, `nonce`, `chunkSizes` as direct fields) into a metadata-only base and an extension: + +```typescript +// Metadata-only variant (no encData — data lives in Worker/OPFS) +export interface EncryptedFileMetadata { + digest: Uint8Array + key: Uint8Array + nonce: Uint8Array + chunkSizes: number[] +} + +// Full variant (existing, extends metadata with data) +export interface EncryptedFileInfo extends EncryptedFileMetadata { + encData: Uint8Array +} +``` + +### 9.2 uploadFile signature change + +Replace positional optional params with an options bag. Add optional `readChunk`. When provided, `encrypted.encData` is not accessed. + +```typescript +export interface UploadOptions { + onProgress?: (uploaded: number, total: number) => void + redirectThreshold?: number + readChunk?: (offset: number, size: number) => Promise +} + +export async function uploadFile( + agent: XFTPClientAgent, + server: XFTPServer, + encrypted: EncryptedFileMetadata, + options?: UploadOptions +): Promise +``` + +Inside `uploadFile`: +- Chunk read: if `options?.readChunk` is provided, use it. Otherwise, verify `'encData' in encrypted` at runtime (throws `"uploadFile: readChunk required when encData is absent"` if missing), then use `(off, sz) => Promise.resolve((encrypted as EncryptedFileInfo).encData.subarray(off, off + sz))`. This guards against calling `uploadFile` with `EncryptedFileMetadata` but no `readChunk`. For each chunk, call `readChunk(offset, size)` once and use the returned `Uint8Array` for both `getChunkDigest(chunkData)` and `uploadXFTPChunk(..., chunkData)` — do not call `readChunk` twice per chunk. +- Progress total: `const total = encrypted.chunkSizes.reduce((a, b) => a + b, 0)` — replaces `encrypted.encData.length` (line 129) since `EncryptedFileMetadata` has no `encData`. The values are identical: `encData.length === sum(chunkSizes)`. +- `buildDescription` parameter type: change from `EncryptedFileInfo` to `EncryptedFileMetadata` — it only accesses `chunkSizes`, `digest`, `key`, `nonce` (not `encData`). + +`uploadRedirectDescription` (internal) is unchanged — redirect descriptions are always small and created in-memory by `encryptFileForUpload`. + +### 9.3 Backward compatibility + +The signature change from positional params `(agent, server, encrypted, onProgress?, redirectThreshold?)` to `(agent, server, encrypted, options?)` is a breaking change for callers that pass `onProgress` or `redirectThreshold`. In practice, the only callers are the browser test (which passes no options — no change needed) and the web page (new code). `EncryptedFileInfo` extends `EncryptedFileMetadata`, so existing callers that pass `EncryptedFileInfo` work without change. + +### 9.4 client.ts: downloadXFTPChunkRaw + +Split `downloadXFTPChunk` at the network/crypto boundary. The new function does DH key exchange and network I/O but skips transit decryption: + +```typescript +export interface RawChunkResponse { + dhSecret: Uint8Array + nonce: Uint8Array + body: Uint8Array +} + +export async function downloadXFTPChunkRaw( + c: XFTPClient, rpKey: Uint8Array, fId: Uint8Array +): Promise { + const {publicKey, privateKey} = generateX25519KeyPair() + const cmd = encodeFGET(encodePubKeyX25519(publicKey)) + const {response, body} = await sendXFTPCommand(c, rpKey, fId, cmd) + if (response.type !== "FRFile") throw new Error("unexpected response: " + response.type) + const dhSecret = dh(response.rcvDhKey, privateKey) + return {dhSecret, nonce: response.nonce, body} +} +``` + +`RawChunkResponse` contains only what client.ts produces (DH secret, nonce, encrypted body). The chunk metadata (`chunkNo`, `digest`) is added by agent.ts when constructing `RawDownloadedChunk` (see §9.5). + +The existing `downloadXFTPChunk` is refactored to call `downloadXFTPChunkRaw` + `decryptReceivedChunk`: + +```typescript +export async function downloadXFTPChunk( + c: XFTPClient, rpKey: Uint8Array, fId: Uint8Array, digest?: Uint8Array +): Promise { + const {dhSecret, nonce, body} = await downloadXFTPChunkRaw(c, rpKey, fId) + return decryptReceivedChunk(dhSecret, nonce, body, digest ?? null) +} +``` + +### 9.5 agent.ts: downloadFileRaw, ackFileChunks, RawDownloadedChunk + +New type combining client.ts's `RawChunkResponse` with chunk metadata from agent.ts: + +```typescript +export interface RawDownloadedChunk { + chunkNo: number + dhSecret: Uint8Array + nonce: Uint8Array + body: Uint8Array + digest: Uint8Array +} +``` + +New function providing download orchestration with a raw chunk callback. Handles connection pooling, parallel downloads, redirect resolution, and progress. Does **not** ACK — the caller ACKs after verification. + +```typescript +export interface DownloadRawOptions { + onProgress?: (downloaded: number, total: number) => void + concurrency?: number // max parallel chunk downloads, default 1 +} + +export async function downloadFileRaw( + agent: XFTPClientAgent, + fd: FileDescription, + onRawChunk: (chunk: RawDownloadedChunk) => Promise, + options?: DownloadRawOptions +): Promise +``` + +Returns the resolved `FileDescription` — for redirect files this is the inner fd, for non-redirect files this is the original fd. The caller uses this for verification and ACK. + +Internal structure: + +1. Validate `fd` via `validateFileDescription` (may double-validate if caller already validated via `decodeDescriptionURI` — harmless) +2. If `fd.redirect !== null`: resolve redirect on main thread (redirect data is small): + a. Download redirect chunks via `downloadXFTPChunk` (not raw — main thread decryption is fine for a few KB) + b. Verify size + digest, `processDownloadedFile` → YAML bytes + c. Parse inner `FileDescription`, validate against `fd.redirect.{size, digest}` + d. ACK redirect chunks (best-effort — redirect chunks are small and separate from the file chunks) + e. Replace `fd` with inner description +3. Pre-connect: call `getXFTPServerClient(agent, server)` for each unique server before launching concurrent workers. This ensures the client connection exists in the agent's map, avoiding a race condition where multiple concurrent workers all see the client as missing and each call `connectXFTP` independently (leaking all but the last connection). Known limitation: if a connection drops mid-download and multiple workers attempt reconnection simultaneously, the same TOCTOU race reappears. This is a pre-existing issue in `getXFTPServerClient`; a proper fix (per-key connection promise) is out of scope for this plan but should be tracked for follow-up. +4. Download file chunks in parallel (concurrency-limited via sliding window): + - Create a queue of chunk indices `[0, 1, ..., N-1]`. Launch `min(concurrency, N)` async workers, each pulling the next index from the queue until empty. Each worker loops: pull index → derive key → `getXFTPServerClient` → `downloadXFTPChunkRaw` → `await onRawChunk(...)` → update progress → next index. `await Promise.all(workers)` to wait for completion. + - For each chunk: derive key (`decodePrivKeyEd25519` → `ed25519KeyPairFromSeed`), get client (`getXFTPServerClient`), call `downloadXFTPChunkRaw`, `await onRawChunk(...)` with result + `chunkNo` + `chunk.digest` + - Each concurrency slot awaits its `onRawChunk` before starting the next download on that slot. With `concurrency > 1`, multiple `onRawChunk` calls may be in-flight concurrently (one per slot). The Worker handles this correctly — messages are queued and processed sequentially. + - Update progress after each chunk: `downloaded += chunk.chunkSize; onProgress?.(downloaded, resolvedFd.size)` — both values use encrypted sizes for consistency +5. Return the resolved `fd` + +New helper for ACKing after verification: + +```typescript +export async function ackFileChunks( + agent: XFTPClientAgent, fd: FileDescription +): Promise { + for (const chunk of fd.chunks) { + const replica = chunk.replicas[0] + if (!replica) continue + try { + const client = await getXFTPServerClient(agent, parseXFTPServer(replica.server)) + const seed = decodePrivKeyEd25519(replica.replicaKey) + const kp = ed25519KeyPairFromSeed(seed) + await ackXFTPChunk(client, kp.privateKey, replica.replicaId) + } catch (_) {} + } +} +``` + +The existing `downloadFile` is refactored to use `downloadFileRaw` internally: + +```typescript +export async function downloadFile( + agent: XFTPClientAgent, + fd: FileDescription, + onProgress?: (downloaded: number, total: number) => void +): Promise { + const chunks: Uint8Array[] = [] + const resolvedFd = await downloadFileRaw(agent, fd, async (raw) => { + chunks[raw.chunkNo - 1] = decryptReceivedChunk( + raw.dhSecret, raw.nonce, raw.body, raw.digest + ) + }, {onProgress}) + // verify + file-level decrypt using resolvedFd (inner fd for redirect case) + const combined = chunks.length === 1 ? chunks[0] : concatBytes(...chunks) + if (combined.length !== resolvedFd.size) throw new Error("downloadFile: file size mismatch") + const digest = sha512(combined) + if (!digestEqual(digest, resolvedFd.digest)) throw new Error("downloadFile: file digest mismatch") + // processDownloadedFile re-concatenates chunks internally — this mirrors the + // existing downloadFile pattern (verify on concatenated data, then pass chunks + // array to decryptChunks which concatenates again). Acceptable overhead for + // correctness: verification must happen on transit-decrypted data before + // file-level decryption transforms it. + const result = processDownloadedFile(resolvedFd, chunks) + await ackFileChunks(agent, resolvedFd) + return result +} +``` + +Existing callers retain serial behavior (`concurrency` defaults to 1). The web page opts into parallelism by passing `concurrency: 3`. The browser test (`test/browser.test.ts`) continues to work unchanged. The chunks array is initialized empty (`[]`) and populated by sparse index assignment (`chunks[raw.chunkNo - 1] = ...`), so it correctly handles both redirect and non-redirect cases regardless of the outer fd's chunk count. `digestEqual` is an existing module-private helper in agent.ts (line 327) that performs constant-time byte comparison. + +### 9.6 Backward compatibility (download) + +`downloadFile` signature is unchanged — existing callers are unaffected. The refactoring adds `downloadFileRaw`, `ackFileChunks`, and `RawDownloadedChunk` as new exports from agent.ts, and `downloadXFTPChunkRaw` + `RawChunkResponse` as new exports from client.ts. + +## 10. Testing + +### 10.1 Existing tests (unchanged) + +- `npm run test:browser` — vitest browser round-trip (library-level) +- `cabal test --test-option='--match=/XFTP Web Client/'` — Haskell per-function tests + +### 10.2 New: page E2E test + +Add `test/page.spec.ts` using `@playwright/test` (not vitest browser mode — vitest tests run IN the browser and can't control page navigation; Playwright tests run in Node.js and control the browser). Add `@playwright/test` as a devDependency. + +Add `playwright.config.ts` at the project root (`xftp-web/`): +- `webServer: { command: 'vite build --mode local && vite preview', url: 'http://localhost:4173', reuseExistingServer: !process.env.CI }` — the `url` property tells Playwright to wait until the preview server is ready before running tests +- `use.ignoreHTTPSErrors: true` (test server uses self-signed cert) +- `use.launchOptions: { args: ['--ignore-certificate-errors'] }` — required because Playwright's `ignoreHTTPSErrors` only affects page navigation, not `fetch()` calls from in-page JavaScript. Without this flag, the page's `createBrowserTransport` fetch to `https://localhost:7000` would fail TLS validation. +- `globalSetup`: `'./test/globalSetup.ts'` (starts xftp-server, shared with vitest) + +```typescript +import {test, expect} from '@playwright/test' + +test('page upload + download round-trip', async ({page}) => { + await page.goto(PAGE_URL) + // Set file input via page.setInputFiles() + // Wait for upload link to appear: page.waitForSelector('[data-testid="share-link"]') + // Extract hash from link text + // Navigate to PAGE_URL + '#' + hash + // Wait for download complete state + // Verify file was offered for save (check download event) +}) +``` + +Add script: `"test:page": "playwright test test/page.spec.ts"` + +This tests the real bundle including Worker loading, OPFS, and CSP. The existing `test/browser.test.ts` continues to test the library-level pipeline (vitest browser mode, no Workers). + +### 10.3 Manual testing + +`npm run dev` → open `localhost:5173` in browser → drag file → get link → open link in new tab → download. Requires xftp-server running on port 7000 (local mode). + +## 11. Files + +**Create:** +- `xftp-web/web/index.html` — page entry point (includes CSP meta tag) +- `xftp-web/web/main.ts` — router + libsodium init +- `xftp-web/web/upload.ts` — upload UI + orchestration +- `xftp-web/web/download.ts` — download UI + orchestration +- `xftp-web/web/progress.ts` — circular progress canvas component +- `xftp-web/web/servers.json` — preset server addresses (shared by servers.ts and vite.config.ts) +- `xftp-web/web/servers.ts` — server configuration (imports servers.json) +- `xftp-web/web/crypto-backend.ts` — CryptoBackend interface + WorkerBackend + factory +- `xftp-web/web/crypto.worker.ts` — Web Worker implementation +- `xftp-web/web/style.css` — styles +- `xftp-web/vite.config.ts` — page build config (CSP generation, server list) +- `xftp-web/tsconfig.web.json` — IDE/CI type-checking for `web/` main-thread files (DOM) +- `xftp-web/tsconfig.worker.json` — IDE/CI type-checking for `web/crypto.worker.ts` (WebWorker) +- `xftp-web/playwright.config.ts` — Playwright E2E test config (webServer, globalSetup) +- `xftp-web/test/page.spec.ts` — page E2E test (Playwright) + +**Modify:** +- `xftp-web/src/agent.ts` — add `EncryptedFileMetadata` type, `uploadFile` options bag with `readChunk`, `downloadFileRaw` with `onRawChunk` callback + parallel downloads, `ackFileChunks`, `RawDownloadedChunk` type, refactor `downloadFile` on top of `downloadFileRaw`, add `import {decryptReceivedChunk} from "./download.js"` (needed by refactored `downloadFile`) +- `xftp-web/src/client.ts` — add `downloadXFTPChunkRaw`, `RawChunkResponse` type, refactor `downloadXFTPChunk` to use raw variant +- `xftp-web/package.json` — add dev/build/check:web/test:page scripts, add `vite` + `@playwright/test` devDeps +- `xftp-web/src/protocol/description.ts` — fix stale "SHA-256" comment on `FileDescription.digest` to "SHA-512" +- `xftp-web/.gitignore` — add `dist-web/` + +## 12. Implementation Order + +1. **Library refactoring** — `client.ts`: add `downloadXFTPChunkRaw`; `agent.ts`: add `downloadFileRaw` + parallel downloads, `uploadFile` options bag with `readChunk`; refactor existing `downloadFile` on top of `downloadFileRaw`. Run existing tests to verify no regressions. +2. **Vite config + HTML shell** — `vite.config.ts`, `index.html`, `main.ts`, verify dev server works +3. **Server config** — `servers.ts` with both local and production server lists +4. **CryptoBackend + Worker** — interface, WorkerBackend, Worker implementation, OPFS logic +5. **Upload flow** — `upload.ts` with drag-drop, encrypt via Worker, upload via agent, show link +6. **Download flow** — `download.ts` with URL parsing, download via agent `downloadFileRaw`, Worker decrypt, browser save +7. **Progress component** — `progress.ts` canvas drawing +8. **Styling** — `style.css` +9. **Testing** — page E2E test, manual browser verification +10. **Build scripts** — `build:local`, `build:prod` in package.json diff --git a/rfcs/2026-02-05-xftp-web-e2e-tests.md b/rfcs/2026-02-05-xftp-web-e2e-tests.md new file mode 100644 index 0000000000..355f76df69 --- /dev/null +++ b/rfcs/2026-02-05-xftp-web-e2e-tests.md @@ -0,0 +1,1196 @@ +# XFTP Web Page E2E Tests Plan + +## Table of Contents + +1. [Executive Summary](#1-executive-summary) +2. [Test Infrastructure](#2-test-infrastructure) +3. [Upload Flow Tests](#3-upload-flow-tests) +4. [Download Flow Tests](#4-download-flow-tests) +5. [Edge Cases](#5-edge-cases) +6. [Implementation Order](#6-implementation-order) +7. [Test Utilities](#7-test-utilities) + +--- + +## 1. Executive Summary + +This document specifies comprehensive Playwright E2E tests for the XFTP web page. The existing test (`page.spec.ts`) performs a basic upload/download round-trip. This plan extends coverage to: + +- **Upload flow**: File selection (picker + drag-drop), validation, progress, cancellation, link sharing, error handling +- **Download flow**: Invalid link handling, download button, progress, file save, error states +- **Edge cases**: Boundary file sizes, special characters, network failures, multi-chunk files with redirect, UI information display + +**Key constraints**: +- Tests run against a local XFTP server (started via `globalSetup.ts`) +- Server port is dynamic (read from `/tmp/xftp-test-server.port`) +- Browser uses `--ignore-certificate-errors` for self-signed certs +- OPFS and Web Workers are required (Chromium supports both) + +**Test file location**: `/code/simplexmq/xftp-web/test/page.spec.ts` + +--- + +## 2. Test Infrastructure + +### 2.1 Current Setup + +``` +xftp-web/ +├── playwright.config.ts # Playwright config (webServer, globalSetup) +├── test/ +│ ├── globalSetup.ts # Starts xftp-server, writes port to PORT_FILE +│ └── page.spec.ts # E2E tests (to be extended) +``` + +### 2.2 Prerequisites + +- `globalSetup.ts` starts the XFTP server and writes port to `PORT_FILE` +- Tests must read the port dynamically: `readFileSync(PORT_FILE, 'utf-8').trim()` +- Vite builds and serves the page at `http://localhost:4173` + +### 2.3 Test Helpers to Add + +```typescript +// Helper: read server port from file +function getServerPort(): number { + return parseInt(readFileSync(PORT_FILE, 'utf-8').trim(), 10) +} + +// Helper: create test file buffer +function createTestFile(size: number, pattern?: string): Buffer { + if (pattern) { + const repeated = pattern.repeat(Math.ceil(size / pattern.length)) + return Buffer.from(repeated.slice(0, size), 'utf-8') + } + return Buffer.alloc(size, 0x41) // 'A' repeated +} + +// Helper: wait for element text to match +async function waitForText(page: Page, selector: string, text: string, timeout = 30000) { + await expect(page.locator(selector)).toContainText(text, {timeout}) +} +``` + +--- + +## 3. Upload Flow Tests + +### 3.1 File Selection - File Picker Button + +**Test ID**: `upload-file-picker` + +**Purpose**: Verify file selection via the "Choose file" button triggers upload. + +**Steps**: +1. Navigate to page +2. Verify drop zone visible with "Choose file" button +3. Set file via hidden input `#file-input` +4. Verify upload progress stage becomes visible +5. Wait for share link to appear + +**Assertions**: +- Drop zone hidden after file selection +- Progress stage visible during upload +- Share link contains valid URL with hash fragment + +```typescript +test('upload via file picker button', async ({page}) => { + await page.goto(PAGE_URL) + await expect(page.locator('#drop-zone')).toBeVisible() + await expect(page.locator('label[for="file-input"]')).toContainText('Choose file') + + const buffer = Buffer.from('test content ' + Date.now(), 'utf-8') + await page.locator('#file-input').setInputFiles({ + name: 'picker-test.txt', + mimeType: 'text/plain', + buffer + }) + + await expect(page.locator('#upload-progress')).toBeVisible() + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + + const link = await page.locator('[data-testid="share-link"]').inputValue() + expect(link).toMatch(/^http:\/\/localhost:\d+\/#/) +}) +``` + +### 3.2 File Selection - Drag and Drop + +**Test ID**: `upload-drag-drop` + +**Purpose**: Verify drag-and-drop file selection works correctly. + +**Steps**: +1. Navigate to page +2. Simulate dragover event on drop zone +3. Verify drop zone shows drag-over state +4. Simulate drop event with file +5. Verify upload starts + +**Assertions**: +- Drop zone gets `drag-over` class on dragover +- Drop zone loses `drag-over` class on drop +- Upload progress visible after drop + +```typescript +test('upload via drag and drop', async ({page}) => { + await page.goto(PAGE_URL) + const dropZone = page.locator('#drop-zone') + await expect(dropZone).toBeVisible() + + // Create DataTransfer with file + const buffer = Buffer.from('drag drop test ' + Date.now(), 'utf-8') + + // Playwright's setInputFiles doesn't support drag-drop directly, + // but the file input handles both cases - use input as proxy + await page.locator('#file-input').setInputFiles({ + name: 'dragdrop-test.txt', + mimeType: 'text/plain', + buffer + }) + + await expect(page.locator('#upload-progress')).toBeVisible() + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) +}) +``` + +**Note**: True drag-drop testing requires `page.dispatchEvent()` with a DataTransfer mock. The file input path covers the same code path after event handling. + +### 3.3 File Size Validation - Too Large + +**Test ID**: `upload-file-too-large` + +**Purpose**: Verify files exceeding 100MB are rejected with error message. + +**Steps**: +1. Navigate to page +2. Set file larger than 100MB via input +3. Verify error stage shown immediately (no upload attempt) + +**Assertions**: +- Error message contains "too large" and file size +- Error message mentions 100 MB limit +- Retry button visible + +```typescript +test('upload rejects file over 100MB', async ({page}) => { + await page.goto(PAGE_URL) + + // Use page.evaluate to create a file with the desired size + // without actually allocating 100MB in the test process + await page.evaluate(() => { + const input = document.getElementById('file-input') as HTMLInputElement + const mockFile = new File([new ArrayBuffer(100 * 1024 * 1024 + 1)], 'large.bin') + const dt = new DataTransfer() + dt.items.add(mockFile) + input.files = dt.files + input.dispatchEvent(new Event('change', {bubbles: true})) + }) + + await expect(page.locator('#upload-error')).toBeVisible() + await expect(page.locator('#error-msg')).toContainText('too large') + await expect(page.locator('#error-msg')).toContainText('100 MB') +}) +``` + +### 3.4 File Size Validation - Empty File + +**Test ID**: `upload-file-empty` + +**Purpose**: Verify empty files are rejected. + +**Steps**: +1. Navigate to page +2. Set empty file via input +3. Verify error message shown + +**Assertions**: +- Error message contains "empty" + +```typescript +test('upload rejects empty file', async ({page}) => { + await page.goto(PAGE_URL) + + await page.locator('#file-input').setInputFiles({ + name: 'empty.txt', + mimeType: 'text/plain', + buffer: Buffer.alloc(0) + }) + + await expect(page.locator('#upload-error')).toBeVisible() + await expect(page.locator('#error-msg')).toContainText('empty') +}) +``` + +### 3.5 Progress Display + +**Test ID**: `upload-progress-display` + +**Purpose**: Verify progress ring updates during upload. + +**Steps**: +1. Navigate to page +2. Upload a file large enough to observe progress +3. Capture progress values during upload +4. Verify progress increases monotonically + +**Assertions**: +- Progress container contains canvas element +- Status text changes from "Encrypting" to "Uploading" +- Progress percentage visible in canvas + +```typescript +test('upload shows progress', async ({page}) => { + await page.goto(PAGE_URL) + + // Use larger file to observe progress updates + const buffer = Buffer.alloc(500 * 1024, 0x42) // 500KB + await page.locator('#file-input').setInputFiles({ + name: 'progress-test.bin', + mimeType: 'application/octet-stream', + buffer + }) + + // Verify progress elements + await expect(page.locator('#upload-progress')).toBeVisible() + await expect(page.locator('#progress-container canvas')).toBeVisible() + + // Status should show encrypting then uploading + await expect(page.locator('#upload-status')).toContainText('Encrypting') + await expect(page.locator('#upload-status')).toContainText('Uploading', {timeout: 10_000}) + + // Wait for completion + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) +}) +``` + +### 3.6 Cancel Button + +**Test ID**: `upload-cancel` + +**Purpose**: Verify cancel button aborts upload and returns to landing. + +**Steps**: +1. Navigate to page +2. Start uploading a larger file +3. Click cancel button while upload in progress +4. Verify return to drop zone state + +**Assertions**: +- Cancel button visible during upload +- Drop zone visible after cancel +- No share link appears + +```typescript +test('upload cancel returns to landing', async ({page}) => { + await page.goto(PAGE_URL) + + // Use larger file to have time to cancel + const buffer = Buffer.alloc(1024 * 1024, 0x43) // 1MB + await page.locator('#file-input').setInputFiles({ + name: 'cancel-test.bin', + mimeType: 'application/octet-stream', + buffer + }) + + await expect(page.locator('#cancel-btn')).toBeVisible() + await page.locator('#cancel-btn').click() + + await expect(page.locator('#drop-zone')).toBeVisible() + await expect(page.locator('#upload-progress')).toBeHidden() + await expect(page.locator('[data-testid="share-link"]')).toBeHidden() +}) +``` + +### 3.7 Share Link Display and Copy + +**Test ID**: `upload-share-link-copy` + +**Purpose**: Verify share link is displayed and copy button works. + +**Steps**: +1. Complete upload +2. Verify share link input contains valid URL +3. Click copy button +4. Verify button text changes to "Copied!" +5. Verify clipboard contains link (if clipboard API available) + +**Assertions**: +- Share link matches expected format +- Copy button text changes on click +- Link can be used to navigate to download page + +```typescript +test('upload share link and copy button', async ({page, context}) => { + // Grant clipboard permissions + await context.grantPermissions(['clipboard-read', 'clipboard-write']) + + await page.goto(PAGE_URL) + + const content = 'copy test ' + Date.now() + const buffer = Buffer.from(content, 'utf-8') + await page.locator('#file-input').setInputFiles({ + name: 'copy-test.txt', + mimeType: 'text/plain', + buffer + }) + + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + + const shareLink = page.locator('[data-testid="share-link"]') + const linkValue = await shareLink.inputValue() + expect(linkValue).toMatch(/^http:\/\/localhost:\d+\/#[A-Za-z0-9_-]+/) + + // Click copy + await page.locator('#copy-btn').click() + await expect(page.locator('#copy-btn')).toContainText('Copied!') + + // Verify clipboard (may fail in headless without permissions) + try { + const clipboardText = await page.evaluate(() => navigator.clipboard.readText()) + expect(clipboardText).toBe(linkValue) + } catch { + // Clipboard API may not be available in all test environments + } + + // Button reverts after timeout + await expect(page.locator('#copy-btn')).toContainText('Copy', {timeout: 3000}) +}) +``` + +### 3.8 Error Handling and Retry + +**Test ID**: `upload-error-retry` + +**Purpose**: Verify error state shows retry button that restarts upload. + +**Steps**: +1. Trigger upload error (e.g., by stopping server or using invalid server) +2. Verify error state shown +3. Click retry button +4. Verify upload restarts + +**Note**: Testing true network errors requires server manipulation. Alternative: test retry button functionality after validation error. + +```typescript +test('upload error shows retry button', async ({page}) => { + await page.goto(PAGE_URL) + + // Trigger validation error first (empty file) + await page.locator('#file-input').setInputFiles({ + name: 'error-test.txt', + mimeType: 'text/plain', + buffer: Buffer.alloc(0) + }) + + await expect(page.locator('#upload-error')).toBeVisible() + await expect(page.locator('#retry-btn')).toBeVisible() + + // Note: clicking retry uses pendingFile which was empty + // To test actual retry flow, file must be re-selected first +}) +``` + +--- + +## 4. Download Flow Tests + +### 4.1 Invalid Link Handling - Malformed Hash + +**Test ID**: `download-invalid-hash-malformed` + +**Purpose**: Verify malformed hash fragment shows error. + +**Steps**: +1. Navigate to page with invalid hash (not valid base64url) +2. Verify error message displayed + +**Assertions**: +- Error message contains "Invalid" or "corrupted" +- No download button visible + +```typescript +test('download shows error for malformed hash', async ({page}) => { + await page.goto(PAGE_URL + '#not-valid-base64!!!') + + await expect(page.locator('.error')).toBeVisible() + await expect(page.locator('.error')).toContainText(/[Ii]nvalid|corrupted/) + await expect(page.locator('#dl-btn')).toBeHidden() +}) +``` + +### 4.2 Invalid Link Handling - Valid Base64 but Invalid Structure + +**Test ID**: `download-invalid-hash-structure` + +**Purpose**: Verify structurally invalid (but base64-decodable) hash shows error. + +**Steps**: +1. Navigate to page with valid base64url that decodes to invalid data +2. Verify error message displayed + +```typescript +test('download shows error for invalid structure', async ({page}) => { + // Valid base64url but not valid DEFLATE-compressed YAML + await page.goto(PAGE_URL + '#AAAA') + + await expect(page.locator('.error')).toBeVisible() + await expect(page.locator('.error')).toContainText(/[Ii]nvalid|corrupted/) +}) +``` + +### 4.3 Invalid Link Handling - Expired/Deleted File + +**Test ID**: `download-expired-file` + +**Purpose**: Verify expired or deleted file shows appropriate error. + +**Steps**: +1. Upload a file +2. Delete the file on server (or use stale link from previous test run) +3. Try to download +4. Verify error shown + +**Note**: Requires either server manipulation or using a pre-generated stale link. Marked as skipped. + +```typescript +test.skip('download shows error for expired file', async ({page}) => { + // This test requires server manipulation to delete the file + // or a mechanism to generate expired links +}) +``` + +### 4.4 Download Button Click + +**Test ID**: `download-button-click` + +**Purpose**: Verify download button initiates download. + +**Steps**: +1. Upload file to get valid link +2. Navigate to download page +3. Verify download button visible with file size +4. Click download button +5. Verify download starts + +```typescript +test('download button initiates download', async ({page}) => { + // First upload a file + await page.goto(PAGE_URL) + const content = 'download button test ' + Date.now() + await page.locator('#file-input').setInputFiles({ + name: 'dl-btn-test.txt', + mimeType: 'text/plain', + buffer: Buffer.from(content, 'utf-8') + }) + + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + const linkValue = await page.locator('[data-testid="share-link"]').inputValue() + const hash = new URL(linkValue).hash + + // Navigate to download page + await page.goto(PAGE_URL + hash) + + await expect(page.locator('#dl-btn')).toBeVisible() + await expect(page.locator('#dl-ready')).toContainText(/File available/) + + // Click and verify download + const downloadPromise = page.waitForEvent('download') + await page.locator('#dl-btn').click() + const download = await downloadPromise + + expect(download.suggestedFilename()).toBe('dl-btn-test.txt') +}) +``` + +### 4.5 Progress Display + +**Test ID**: `download-progress-display` + +**Purpose**: Verify progress is shown during download. + +**Steps**: +1. Upload larger file +2. Navigate to download page +3. Click download +4. Verify progress ring visible +5. Verify status text updates + +```typescript +test('download shows progress', async ({page}) => { + // Upload larger file + await page.goto(PAGE_URL) + const buffer = Buffer.alloc(500 * 1024, 0x44) // 500KB + await page.locator('#file-input').setInputFiles({ + name: 'dl-progress.bin', + mimeType: 'application/octet-stream', + buffer + }) + + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + const linkValue = await page.locator('[data-testid="share-link"]').inputValue() + const hash = new URL(linkValue).hash + + await page.goto(PAGE_URL + hash) + + const downloadPromise = page.waitForEvent('download') + await page.locator('#dl-btn').click() + + await expect(page.locator('#dl-progress')).toBeVisible() + await expect(page.locator('#dl-progress-container canvas')).toBeVisible() + await expect(page.locator('#dl-status')).toContainText('Downloading') + + await downloadPromise + await expect(page.locator('#dl-status')).toContainText(/complete|Decrypting/) +}) +``` + +### 4.6 File Save Verification + +**Test ID**: `download-file-save` + +**Purpose**: Verify downloaded file has correct content. + +**Steps**: +1. Upload file with known content +2. Download the file +3. Verify downloaded content matches original + +```typescript +test('download file content matches upload', async ({page}) => { + const content = 'verification content ' + Date.now() + ' special chars: @#$%' + const fileName = 'verify.txt' + + await page.goto(PAGE_URL) + await page.locator('#file-input').setInputFiles({ + name: fileName, + mimeType: 'text/plain', + buffer: Buffer.from(content, 'utf-8') + }) + + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + const linkValue = await page.locator('[data-testid="share-link"]').inputValue() + const hash = new URL(linkValue).hash + + await page.goto(PAGE_URL + hash) + + const downloadPromise = page.waitForEvent('download') + await page.locator('#dl-btn').click() + const download = await downloadPromise + + expect(download.suggestedFilename()).toBe(fileName) + + const path = await download.path() + if (path) { + const downloadedContent = (await import('fs')).readFileSync(path, 'utf-8') + expect(downloadedContent).toBe(content) + } +}) +``` + +### 4.7 Error States + +**Test ID**: `download-error-states` + +**Purpose**: Verify error state UI elements. + +**Steps**: +1. Trigger download error +2. Verify error message shown +3. Verify retry button present + +```typescript +test('download error shows retry button', async ({page}) => { + // Navigate to invalid hash to trigger error + await page.goto(PAGE_URL + '#invalid') + await expect(page.locator('.error')).toBeVisible() + + // Note: Retry button only appears for download errors during transfer, + // not for initial parse errors which show immediately without retry option +}) +``` + +--- + +## 5. Edge Cases + +### 5.1 Very Small Files + +**Test ID**: `edge-small-file` + +**Purpose**: Verify 1-byte file uploads and downloads correctly. + +```typescript +test('upload and download 1-byte file', async ({page}) => { + await page.goto(PAGE_URL) + + await page.locator('#file-input').setInputFiles({ + name: 'tiny.bin', + mimeType: 'application/octet-stream', + buffer: Buffer.from([0x42]) + }) + + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + const linkValue = await page.locator('[data-testid="share-link"]').inputValue() + const hash = new URL(linkValue).hash + + await page.goto(PAGE_URL + hash) + + const downloadPromise = page.waitForEvent('download') + await page.locator('#dl-btn').click() + const download = await downloadPromise + + expect(download.suggestedFilename()).toBe('tiny.bin') + const path = await download.path() + if (path) { + const content = (await import('fs')).readFileSync(path) + expect(content.length).toBe(1) + expect(content[0]).toBe(0x42) + } +}) +``` + +### 5.2 Files Near 100MB Limit + +**Test ID**: `edge-near-limit` + +**Purpose**: Verify file at exactly 100MB uploads successfully. + +**Note**: This test is slow due to large file size. Mark as `test.slow()`. + +```typescript +test.slow() +test('upload file at exactly 100MB', async ({page}) => { + await page.goto(PAGE_URL) + + // 100MB exactly - use browser-side file creation + const size = 100 * 1024 * 1024 + await page.evaluate((size) => { + const input = document.getElementById('file-input') as HTMLInputElement + const buffer = new ArrayBuffer(size) + const file = new File([buffer], 'exactly-100mb.bin', {type: 'application/octet-stream'}) + const dt = new DataTransfer() + dt.items.add(file) + input.files = dt.files + input.dispatchEvent(new Event('change', {bubbles: true})) + }, size) + + // Should succeed (not show error) + await expect(page.locator('#upload-error')).toBeHidden({timeout: 5000}) + await expect(page.locator('#upload-progress')).toBeVisible() + + // Wait for completion (may take a while for 100MB) + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 300_000}) +}) +``` + +### 5.3 Special Characters in Filename + +**Test ID**: `edge-special-chars-filename` + +**Purpose**: Verify filenames with special characters are handled correctly. + +**Test cases**: +- Unicode characters +- Spaces +- Dots (multiple extensions) +- Path separators (should be stripped) +- Control characters (should be replaced) + +```typescript +test('upload and download file with unicode filename', async ({page}) => { + await page.goto(PAGE_URL) + + const fileName = 'test-file-\u4e2d\u6587-\u0420\u0443\u0441\u0441\u043a\u0438\u0439.txt' + await page.locator('#file-input').setInputFiles({ + name: fileName, + mimeType: 'text/plain', + buffer: Buffer.from('unicode filename test', 'utf-8') + }) + + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + const linkValue = await page.locator('[data-testid="share-link"]').inputValue() + const hash = new URL(linkValue).hash + + await page.goto(PAGE_URL + hash) + + const downloadPromise = page.waitForEvent('download') + await page.locator('#dl-btn').click() + const download = await downloadPromise + + expect(download.suggestedFilename()).toBe(fileName) +}) + +test('upload and download file with spaces in name', async ({page}) => { + await page.goto(PAGE_URL) + + const fileName = 'my document (final) v2.txt' + await page.locator('#file-input').setInputFiles({ + name: fileName, + mimeType: 'text/plain', + buffer: Buffer.from('spaces test', 'utf-8') + }) + + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + const linkValue = await page.locator('[data-testid="share-link"]').inputValue() + const hash = new URL(linkValue).hash + + await page.goto(PAGE_URL + hash) + + const downloadPromise = page.waitForEvent('download') + await page.locator('#dl-btn').click() + const download = await downloadPromise + + expect(download.suggestedFilename()).toBe(fileName) +}) + +test('filename with path separators is sanitized', async ({page}) => { + await page.goto(PAGE_URL) + + // Filename with path separators (should be stripped by sanitizeFileName) + const fileName = '../../../etc/passwd' + await page.locator('#file-input').setInputFiles({ + name: fileName, + mimeType: 'text/plain', + buffer: Buffer.from('path traversal test', 'utf-8') + }) + + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + const linkValue = await page.locator('[data-testid="share-link"]').inputValue() + const hash = new URL(linkValue).hash + + await page.goto(PAGE_URL + hash) + + const downloadPromise = page.waitForEvent('download') + await page.locator('#dl-btn').click() + const download = await downloadPromise + + // Path separators should be stripped + expect(download.suggestedFilename()).not.toContain('/') + expect(download.suggestedFilename()).not.toContain('\\') + expect(download.suggestedFilename()).toBe('......etcpasswd') +}) +``` + +### 5.4 Network Errors (Mocked) + +**Test ID**: `edge-network-error` + +**Purpose**: Verify network error handling. + +**Approach**: Use Playwright's route interception to simulate network failures. + +```typescript +test('upload handles network error gracefully', async ({page}) => { + await page.goto(PAGE_URL) + + // Intercept all requests to XFTP server and abort them + await page.route('**/localhost:*', route => { + // Only abort POST requests (the XFTP protocol uses POST) + if (route.request().method() === 'POST') { + route.abort('failed') + } else { + route.continue() + } + }) + + await page.locator('#file-input').setInputFiles({ + name: 'network-error.txt', + mimeType: 'text/plain', + buffer: Buffer.from('network error test', 'utf-8') + }) + + // Should eventually show error + await expect(page.locator('#upload-error')).toBeVisible({timeout: 30_000}) + await expect(page.locator('#error-msg')).toBeVisible() +}) + +test('download handles network error gracefully', async ({page}) => { + // First upload without interception + await page.goto(PAGE_URL) + await page.locator('#file-input').setInputFiles({ + name: 'network-dl-test.txt', + mimeType: 'text/plain', + buffer: Buffer.from('will fail download', 'utf-8') + }) + + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + const linkValue = await page.locator('[data-testid="share-link"]').inputValue() + const hash = new URL(linkValue).hash + + // Navigate and set up interception before clicking download + await page.goto(PAGE_URL + hash) + + await page.route('**/localhost:*', route => { + if (route.request().method() === 'POST') { + route.abort('failed') + } else { + route.continue() + } + }) + + await page.locator('#dl-btn').click() + + await expect(page.locator('#dl-error')).toBeVisible({timeout: 30_000}) + await expect(page.locator('#dl-error-msg')).toBeVisible() +}) +``` + +### 5.5 Binary File Content Integrity + +**Test ID**: `edge-binary-content` + +**Purpose**: Verify binary files with all byte values are handled correctly. + +```typescript +test('binary file with all byte values', async ({page}) => { + await page.goto(PAGE_URL) + + // Create buffer with all 256 byte values + const buffer = Buffer.alloc(256) + for (let i = 0; i < 256; i++) buffer[i] = i + + await page.locator('#file-input').setInputFiles({ + name: 'all-bytes.bin', + mimeType: 'application/octet-stream', + buffer + }) + + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + const linkValue = await page.locator('[data-testid="share-link"]').inputValue() + const hash = new URL(linkValue).hash + + await page.goto(PAGE_URL + hash) + + const downloadPromise = page.waitForEvent('download') + await page.locator('#dl-btn').click() + const download = await downloadPromise + + const path = await download.path() + if (path) { + const content = (await import('fs')).readFileSync(path) + expect(content.length).toBe(256) + for (let i = 0; i < 256; i++) { + expect(content[i]).toBe(i) + } + } +}) +``` + +### 5.6 Multiple Concurrent Downloads + +**Test ID**: `edge-concurrent-downloads` + +**Purpose**: Verify multiple browser tabs can download the same file. + +```typescript +test('concurrent downloads from same link', async ({browser}) => { + const context = await browser.newContext({ignoreHTTPSErrors: true}) + const page1 = await context.newPage() + + // Upload + await page1.goto(PAGE_URL) + await page1.locator('#file-input').setInputFiles({ + name: 'concurrent.txt', + mimeType: 'text/plain', + buffer: Buffer.from('concurrent download test', 'utf-8') + }) + + await expect(page1.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + const linkValue = await page1.locator('[data-testid="share-link"]').inputValue() + const hash = new URL(linkValue).hash + + // Open two tabs and download concurrently + const page2 = await context.newPage() + const page3 = await context.newPage() + + await page2.goto(PAGE_URL + hash) + await page3.goto(PAGE_URL + hash) + + const [download2, download3] = await Promise.all([ + (async () => { + const p = page2.waitForEvent('download') + await page2.locator('#dl-btn').click() + return p + })(), + (async () => { + const p = page3.waitForEvent('download') + await page3.locator('#dl-btn').click() + return p + })() + ]) + + expect(download2.suggestedFilename()).toBe('concurrent.txt') + expect(download3.suggestedFilename()).toBe('concurrent.txt') + + await context.close() +}) +``` + +### 5.7 Redirect File Handling (Multi-chunk) + +**Test ID**: `edge-redirect-file` + +**Purpose**: Verify files large enough to trigger redirect description are handled correctly. + +**Note**: Redirect triggers when URI exceeds ~400 chars threshold with multi-chunk files. A ~10MB file typically has multiple chunks. + +```typescript +test.slow() +test('upload and download multi-chunk file with redirect', async ({page}) => { + await page.goto(PAGE_URL) + + // Use ~5MB file to get multiple chunks (chunk size is ~4MB) + const size = 5 * 1024 * 1024 + await page.evaluate((size) => { + const input = document.getElementById('file-input') as HTMLInputElement + const buffer = new ArrayBuffer(size) + new Uint8Array(buffer).fill(0x55) + const file = new File([buffer], 'multi-chunk.bin', {type: 'application/octet-stream'}) + const dt = new DataTransfer() + dt.items.add(file) + input.files = dt.files + input.dispatchEvent(new Event('change', {bubbles: true})) + }, size) + + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 120_000}) + const linkValue = await page.locator('[data-testid="share-link"]').inputValue() + const hash = new URL(linkValue).hash + + await page.goto(PAGE_URL + hash) + + const downloadPromise = page.waitForEvent('download') + await page.locator('#dl-btn').click() + const download = await downloadPromise + + expect(download.suggestedFilename()).toBe('multi-chunk.bin') + + // Verify size + const path = await download.path() + if (path) { + const stat = (await import('fs')).statSync(path) + expect(stat.size).toBe(size) + } +}) +``` + +### 5.8 UI Information Display + +**Test ID**: `edge-ui-info` + +**Purpose**: Verify informational UI elements are displayed correctly. + +```typescript +test('upload complete shows expiry message and security note', async ({page}) => { + await page.goto(PAGE_URL) + + await page.locator('#file-input').setInputFiles({ + name: 'ui-test.txt', + mimeType: 'text/plain', + buffer: Buffer.from('ui test', 'utf-8') + }) + + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + + // Verify expiry message + await expect(page.locator('.expiry')).toContainText('48 hours') + + // Verify security note + await expect(page.locator('.security-note')).toBeVisible() + await expect(page.locator('.security-note')).toContainText('encrypted') + await expect(page.locator('.security-note')).toContainText('hash fragment') +}) + +test('download page shows file size and security note', async ({page}) => { + await page.goto(PAGE_URL) + + const buffer = Buffer.alloc(1024, 0x46) // 1KB + await page.locator('#file-input').setInputFiles({ + name: 'size-test.bin', + mimeType: 'application/octet-stream', + buffer + }) + + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + const linkValue = await page.locator('[data-testid="share-link"]').inputValue() + const hash = new URL(linkValue).hash + + await page.goto(PAGE_URL + hash) + + // Verify file size displayed (encrypted size is slightly larger) + await expect(page.locator('#dl-ready')).toContainText(/\d+.*[KB|B]/) + + // Verify security note + await expect(page.locator('.security-note')).toBeVisible() + await expect(page.locator('.security-note')).toContainText('encrypted') +}) +``` + +### 5.9 Drag-Drop Visual Feedback + +**Test ID**: `edge-drag-drop-visual` + +**Purpose**: Verify drag-over visual state is applied correctly. + +```typescript +test('drop zone shows visual feedback on drag over', async ({page}) => { + await page.goto(PAGE_URL) + const dropZone = page.locator('#drop-zone') + + // Verify initial state + await expect(dropZone).not.toHaveClass(/drag-over/) + + // Simulate dragover + await dropZone.dispatchEvent('dragover', { + bubbles: true, + cancelable: true, + dataTransfer: {types: ['Files']} + }) + + // Note: Class may not persist without proper DataTransfer mock + // This test verifies the handler is attached + + // Simulate dragleave + await dropZone.dispatchEvent('dragleave', {bubbles: true}) +}) +``` + +--- + +## 6. Implementation Order + +### Phase 1: Core Happy Path (Priority: High) +1. `upload-file-picker` - Basic upload via file picker +2. `download-button-click` - Basic download +3. `download-file-save` - Content verification + +### Phase 2: Validation (Priority: High) +4. `upload-file-too-large` - Size validation +5. `upload-file-empty` - Empty file validation +6. `download-invalid-hash-malformed` - Invalid link handling +7. `download-invalid-hash-structure` - Invalid structure handling + +### Phase 3: Progress and Cancel (Priority: Medium) +8. `upload-progress-display` - Progress visibility +9. `upload-cancel` - Cancel functionality +10. `download-progress-display` - Download progress + +### Phase 4: Link Sharing (Priority: Medium) +11. `upload-share-link-copy` - Copy button functionality +12. `upload-drag-drop` - Drag-drop upload + +### Phase 5: Edge Cases (Priority: Low) +13. `edge-small-file` - 1-byte file +14. `edge-special-chars-filename` - Unicode/special characters +15. `edge-binary-content` - Binary content integrity +16. `edge-near-limit` - 100MB file (slow test) +17. `edge-network-error` - Network error handling + +### Phase 6: Error Recovery (Priority: Low) +18. `upload-error-retry` - Retry after error +19. `download-error-states` - Error UI +20. `edge-concurrent-downloads` - Concurrent access + +### Phase 7: Advanced Edge Cases (Priority: Low) +21. `edge-redirect-file` - Multi-chunk file with redirect (slow) +22. `edge-ui-info` - Expiry message, security notes, file size display +23. `edge-drag-drop-visual` - Drag-over visual feedback + +--- + +## 7. Test Utilities + +### 7.1 Shared Test Setup + +```typescript +// test/page.spec.ts - imports and constants +import {test, expect, Page} from '@playwright/test' +import {readFileSync} from 'fs' +import {join} from 'path' +import {tmpdir} from 'os' + +const PORT_FILE = join(tmpdir(), 'xftp-test-server.port') +const PAGE_URL = 'http://localhost:4173' + +// Read server port (not currently used but available for future tests) +function getServerPort(): number { + try { + return parseInt(readFileSync(PORT_FILE, 'utf-8').trim(), 10) + } catch { + return 7000 // fallback + } +} +``` + +### 7.2 Test Fixtures + +```typescript +// Reusable file creation helper +async function uploadTestFile(page: Page, name: string, content: string | Buffer): Promise { + const buffer = typeof content === 'string' ? Buffer.from(content, 'utf-8') : content + await page.locator('#file-input').setInputFiles({ + name, + mimeType: buffer.length > 0 && typeof content === 'string' ? 'text/plain' : 'application/octet-stream', + buffer + }) + await expect(page.locator('[data-testid="share-link"]')).toBeVisible({timeout: 30_000}) + return await page.locator('[data-testid="share-link"]').inputValue() +} + +// Extract hash from share link +function getHash(url: string): string { + return new URL(url).hash +} +``` + +### 7.3 Test Organization + +```typescript +test.describe('Upload Flow', () => { + test.beforeEach(async ({page}) => { + await page.goto(PAGE_URL) + }) + + // Upload tests here +}) + +test.describe('Download Flow', () => { + // Download tests here +}) + +test.describe('Edge Cases', () => { + // Edge case tests here +}) +``` + +--- + +## Appendix: Test Matrix + +| Test ID | Category | Priority | Estimated Time | Dependencies | +|---------|----------|----------|----------------|--------------| +| upload-file-picker | Upload | High | 30s | - | +| upload-drag-drop | Upload | Medium | 30s | - | +| upload-file-too-large | Upload | High | 5s | - | +| upload-file-empty | Upload | High | 5s | - | +| upload-progress-display | Upload | Medium | 45s | - | +| upload-cancel | Upload | Medium | 30s | - | +| upload-share-link-copy | Upload | Medium | 30s | - | +| upload-error-retry | Upload | Low | 30s | - | +| download-invalid-hash-malformed | Download | High | 5s | - | +| download-invalid-hash-structure | Download | High | 5s | - | +| download-button-click | Download | High | 45s | upload | +| download-progress-display | Download | Medium | 60s | upload | +| download-file-save | Download | High | 45s | upload | +| download-error-states | Download | Low | 10s | - | +| edge-small-file | Edge | Low | 30s | - | +| edge-near-limit | Edge | Low | 300s | - | +| edge-special-chars-filename | Edge | Low | 30s | - | +| edge-network-error | Edge | Low | 45s | - | +| edge-binary-content | Edge | Low | 30s | - | +| edge-concurrent-downloads | Edge | Low | 60s | upload | +| edge-redirect-file | Edge | Low | 120s | - | +| edge-ui-info | Edge | Low | 60s | upload | +| edge-drag-drop-visual | Edge | Low | 10s | - | + +**Total estimated time**: ~18 minutes (excluding 100MB and 5MB tests) diff --git a/simplexmq.cabal b/simplexmq.cabal index 2ff5de8002..8a79a1716f 100644 --- a/simplexmq.cabal +++ b/simplexmq.cabal @@ -497,6 +497,7 @@ test-suite simplexmq-test XFTPCLI XFTPClient XFTPServerTests + XFTPWebTests Static Static.Embedded Paths_simplexmq @@ -526,6 +527,7 @@ test-suite simplexmq-test , async , base64-bytestring , bytestring + , case-insensitive ==1.2.* , containers , crypton , crypton-x509 diff --git a/src/Simplex/FileTransfer/Client.hs b/src/Simplex/FileTransfer/Client.hs index 62f06b7d31..4c35780d39 100644 --- a/src/Simplex/FileTransfer/Client.hs +++ b/src/Simplex/FileTransfer/Client.hs @@ -40,11 +40,11 @@ import Simplex.Messaging.Client NetworkRequestMode (..), ProtocolClientError (..), TransportSession, - netTimeoutInt, chooseTransportHost, + clientSocksCredentials, defaultNetworkConfig, + netTimeoutInt, transportClientConfig, - clientSocksCredentials, unexpectedResponse, useWebPort, ) @@ -54,13 +54,13 @@ import Simplex.Messaging.Encoding (smpDecode, smpEncode) import Simplex.Messaging.Encoding.String import Simplex.Messaging.Protocol ( BasicAuth, + NetworkError (..), Protocol (..), ProtocolServer (..), RecipientId, SenderId, - pattern NoEntity, - NetworkError (..), toNetworkError, + pattern NoEntity, ) import Simplex.Messaging.Transport (ALPN, CertChainPubKey (..), HandshakeError (..), THandleAuth (..), THandleParams (..), TransportError (..), TransportPeer (..), defaultSupportedParams) import Simplex.Messaging.Transport.Client (TransportClientConfig (..), TransportHost) @@ -126,8 +126,9 @@ getXFTPClient transportSession@(_, srv, _) config@XFTPClientConfig {clientALPN, thParams0 = THandleParams {sessionId, blockSize = xftpBlockSize, thVersion = v, thServerVRange, thAuth = Nothing, implySessId = False, encryptBlock = Nothing, batch = True, serviceAuth = False} logDebug $ "Client negotiated handshake protocol: " <> tshow sessionALPN thParams@THandleParams {thVersion} <- case sessionALPN of - Just alpn | alpn == xftpALPNv1 || alpn == httpALPN11 -> - xftpClientHandshakeV1 serverVRange keyHash http2Client thParams0 + Just alpn + | alpn == xftpALPNv1 || alpn == httpALPN11 -> + xftpClientHandshakeV1 serverVRange keyHash http2Client thParams0 _ -> pure thParams0 logDebug $ "Client negotiated protocol: " <> tshow thVersion let c = XFTPClient {http2Client, thParams, transportSession, config} @@ -212,7 +213,7 @@ sendXFTPTransmission XFTPClient {config, thParams, http2Client} t chunkSpec_ = d HTTP2Response {respBody = body@HTTP2Body {bodyHead}} <- withExceptT xftpClientError . ExceptT $ sendRequest http2Client req (Just reqTimeout) when (B.length bodyHead /= xftpBlockSize) $ throwE $ PCEResponseError BLOCK -- TODO validate that the file ID is the same as in the request? - (_, _fId, respOrErr) <-liftEither $ first PCEResponseError $ xftpDecodeTClient thParams bodyHead + (_, _fId, respOrErr) <- liftEither $ first PCEResponseError $ xftpDecodeTClient thParams bodyHead case respOrErr of Right r -> case protocolError r of Just e -> throwE $ PCEProtocolError e diff --git a/src/Simplex/FileTransfer/Server.hs b/src/Simplex/FileTransfer/Server.hs index 25de49afc8..44f5211e43 100644 --- a/src/Simplex/FileTransfer/Server.hs +++ b/src/Simplex/FileTransfer/Server.hs @@ -63,12 +63,12 @@ import Simplex.Messaging.Server.Stats import Simplex.Messaging.SystemTime import Simplex.Messaging.TMap (TMap) import qualified Simplex.Messaging.TMap as TM -import Simplex.Messaging.Transport (CertChainPubKey (..), SessionId, THandleAuth (..), THandleParams (..), TransportPeer (..), defaultSupportedParams) +import Simplex.Messaging.Transport (CertChainPubKey (..), SessionId, THandleAuth (..), THandleParams (..), TransportPeer (..), defaultSupportedParams, defaultSupportedParamsHTTPS) import Simplex.Messaging.Transport.Buffer (trimCR) import Simplex.Messaging.Transport.HTTP2 import Simplex.Messaging.Transport.HTTP2.File (fileBlockSize) -import Simplex.Messaging.Transport.HTTP2.Server -import Simplex.Messaging.Transport.Server (runLocalTCPServer) +import Simplex.Messaging.Transport.HTTP2.Server (runHTTP2Server) +import Simplex.Messaging.Transport.Server (SNICredentialUsed, TransportServerConfig (..), runLocalTCPServer) import Simplex.Messaging.Util import Simplex.Messaging.Version import System.Environment (lookupEnv) @@ -89,9 +89,24 @@ data XFTPTransportRequest = XFTPTransportRequest { thParams :: THandleParamsXFTP 'TServer, reqBody :: HTTP2Body, request :: H.Request, - sendResponse :: H.Response -> IO () + sendResponse :: H.Response -> IO (), + sniUsed :: SNICredentialUsed, + addCORS :: Bool } +corsHeaders :: Bool -> [N.Header] +corsHeaders addCORS + | addCORS = [("Access-Control-Allow-Origin", "*"), ("Access-Control-Expose-Headers", "*")] + | otherwise = [] + +corsPreflightHeaders :: [N.Header] +corsPreflightHeaders = + [ ("Access-Control-Allow-Origin", "*"), + ("Access-Control-Allow-Methods", "POST, OPTIONS"), + ("Access-Control-Allow-Headers", "*"), + ("Access-Control-Max-Age", "86400") + ] + runXFTPServer :: XFTPServerConfig -> IO () runXFTPServer cfg = do started <- newEmptyTMVarIO @@ -120,27 +135,34 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira runServer :: M () runServer = do srvCreds@(chain, pk) <- asks tlsServerCreds + httpCreds_ <- asks httpServerCreds signKey <- liftIO $ case C.x509ToPrivate' pk of Right pk' -> pure pk' Left e -> putStrLn ("Server has no valid key: " <> show e) >> exitFailure env <- ask sessions <- liftIO TM.emptyIO let cleanup sessionId = atomically $ TM.delete sessionId sessions - liftIO . runHTTP2Server started xftpPort defaultHTTP2BufferSize defaultSupportedParams srvCreds transportConfig inactiveClientExpiration cleanup $ \sessionId sessionALPN r sendResponse -> do - reqBody <- getHTTP2Body r xftpBlockSize - let v = VersionXFTP 1 - thServerVRange = versionToRange v - thParams0 = THandleParams {sessionId, blockSize = xftpBlockSize, thVersion = v, thServerVRange, thAuth = Nothing, implySessId = False, encryptBlock = Nothing, batch = True, serviceAuth = False} - req0 = XFTPTransportRequest {thParams = thParams0, request = r, reqBody, sendResponse} - flip runReaderT env $ case sessionALPN of - Nothing -> processRequest req0 - Just alpn | alpn == xftpALPNv1 || alpn == httpALPN11 -> - xftpServerHandshakeV1 chain signKey sessions req0 >>= \case - Nothing -> pure () -- handshake response sent - Just thParams -> processRequest req0 {thParams} -- proceed with new version (XXX: may as well switch the request handler here) - _ -> liftIO . sendResponse $ H.responseNoBody N.ok200 [] -- shouldn't happen: means server picked handshake protocol it doesn't know about + srvParams = if isJust httpCreds_ then defaultSupportedParamsHTTPS else defaultSupportedParams + liftIO . runHTTP2Server started xftpPort defaultHTTP2BufferSize srvParams srvCreds httpCreds_ transportConfig inactiveClientExpiration cleanup $ \sniUsed sessionId sessionALPN r sendResponse -> do + let addCORS' = sniUsed && addCORSHeaders transportConfig + if addCORS' && H.requestMethod r == Just "OPTIONS" + then sendResponse $ H.responseNoBody N.ok200 corsPreflightHeaders + else do + reqBody <- getHTTP2Body r xftpBlockSize + let v = VersionXFTP 1 + thServerVRange = versionToRange v + thParams0 = THandleParams {sessionId, blockSize = xftpBlockSize, thVersion = v, thServerVRange, thAuth = Nothing, implySessId = False, encryptBlock = Nothing, batch = True, serviceAuth = False} + req0 = XFTPTransportRequest {thParams = thParams0, request = r, reqBody, sendResponse, sniUsed, addCORS = addCORS'} + flip runReaderT env $ case sessionALPN of + Nothing -> processRequest req0 + Just alpn + | alpn == xftpALPNv1 || alpn == httpALPN11 || (sniUsed && alpn == "h2") -> + xftpServerHandshakeV1 chain signKey sessions req0 >>= \case + Nothing -> pure () + Just thParams -> processRequest req0 {thParams} + | otherwise -> liftIO . sendResponse $ H.responseNoBody N.ok200 (corsHeaders addCORS') xftpServerHandshakeV1 :: X.CertificateChain -> C.APrivateSignKey -> TMap SessionId Handshake -> XFTPTransportRequest -> M (Maybe (THandleParams XFTPVersion 'TServer)) - xftpServerHandshakeV1 chain serverSignKey sessions XFTPTransportRequest {thParams = thParams0@THandleParams {sessionId}, reqBody = HTTP2Body {bodyHead}, sendResponse} = do + xftpServerHandshakeV1 chain serverSignKey sessions XFTPTransportRequest {thParams = thParams0@THandleParams {sessionId}, reqBody = HTTP2Body {bodyHead}, sendResponse, sniUsed, addCORS} = do s <- atomically $ TM.lookup sessionId sessions r <- runExceptT $ case s of Nothing -> processHello @@ -149,16 +171,23 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira either sendError pure r where processHello = do - unless (B.null bodyHead) $ throwE HANDSHAKE + challenge_ <- + if + | B.null bodyHead -> pure Nothing + | sniUsed -> do + XFTPClientHello {webChallenge} <- liftHS $ smpDecode bodyHead + pure webChallenge + | otherwise -> throwE HANDSHAKE (k, pk) <- atomically . C.generateKeyPair =<< asks random atomically $ TM.insert sessionId (HandshakeSent pk) sessions let authPubKey = CertChainPubKey chain (C.signX509 serverSignKey $ C.publicToX509 k) - let hs = XFTPServerHandshake {xftpVersionRange = xftpServerVRange, sessionId, authPubKey} + webIdentityProof = C.sign serverSignKey . (<> sessionId) <$> challenge_ + let hs = XFTPServerHandshake {xftpVersionRange = xftpServerVRange, sessionId, authPubKey, webIdentityProof} shs <- encodeXftp hs #ifdef slow_servers lift randomDelay #endif - liftIO . sendResponse $ H.responseBuilder N.ok200 [] shs + liftIO . sendResponse $ H.responseBuilder N.ok200 (corsHeaders addCORS) shs pure Nothing processClientHandshake pk = do unless (B.length bodyHead == xftpBlockSize) $ throwE HANDSHAKE @@ -174,13 +203,13 @@ xftpServer cfg@XFTPServerConfig {xftpPort, transportConfig, inactiveClientExpira #ifdef slow_servers lift randomDelay #endif - liftIO . sendResponse $ H.responseNoBody N.ok200 [] + liftIO . sendResponse $ H.responseNoBody N.ok200 (corsHeaders addCORS) pure Nothing Nothing -> throwE HANDSHAKE sendError :: XFTPErrorType -> M (Maybe (THandleParams XFTPVersion 'TServer)) sendError err = do runExceptT (encodeXftp err) >>= \case - Right bs -> liftIO . sendResponse $ H.responseBuilder N.ok200 [] bs + Right bs -> liftIO . sendResponse $ H.responseBuilder N.ok200 (corsHeaders addCORS) bs Left _ -> logError $ "Error encoding handshake error: " <> tshow err pure Nothing encodeXftp :: Encoding a => a -> ExceptT XFTPErrorType (ReaderT XFTPEnv IO) Builder @@ -346,7 +375,7 @@ data ServerFile = ServerFile } processRequest :: XFTPTransportRequest -> M () -processRequest XFTPTransportRequest {thParams, reqBody = body@HTTP2Body {bodyHead}, sendResponse} +processRequest XFTPTransportRequest {thParams, reqBody = body@HTTP2Body {bodyHead}, sendResponse, addCORS} | B.length bodyHead /= xftpBlockSize = sendXFTPResponse ("", NoEntity, FRErr BLOCK) Nothing | otherwise = case xftpDecodeTServer thParams bodyHead of @@ -365,7 +394,7 @@ processRequest XFTPTransportRequest {thParams, reqBody = body@HTTP2Body {bodyHea #ifdef slow_servers randomDelay #endif - liftIO $ sendResponse $ H.responseStreaming N.ok200 [] $ streamBody t_ + liftIO $ sendResponse $ H.responseStreaming N.ok200 (corsHeaders addCORS) $ streamBody t_ where streamBody t_ send done = do case t_ of diff --git a/src/Simplex/FileTransfer/Server/Env.hs b/src/Simplex/FileTransfer/Server/Env.hs index 206a5b3753..389296a8f5 100644 --- a/src/Simplex/FileTransfer/Server/Env.hs +++ b/src/Simplex/FileTransfer/Server/Env.hs @@ -57,6 +57,7 @@ data XFTPServerConfig = XFTPServerConfig -- | time after which inactive clients can be disconnected and check interval, seconds inactiveClientExpiration :: Maybe ExpirationConfig, xftpCredentials :: ServerCredentials, + httpCredentials :: Maybe ServerCredentials, -- | XFTP client-server protocol version range xftpServerVRange :: VersionRangeXFTP, -- stats config - see SMP server config @@ -84,6 +85,7 @@ data XFTPEnv = XFTPEnv random :: TVar ChaChaDRG, serverIdentity :: C.KeyHash, tlsServerCreds :: T.Credential, + httpServerCreds :: Maybe T.Credential, serverStats :: FileServerStats } @@ -98,7 +100,7 @@ defaultFileExpiration = } newXFTPServerEnv :: XFTPServerConfig -> IO XFTPEnv -newXFTPServerEnv config@XFTPServerConfig {storeLogFile, fileSizeQuota, xftpCredentials} = do +newXFTPServerEnv config@XFTPServerConfig {storeLogFile, fileSizeQuota, xftpCredentials, httpCredentials} = do random <- C.newRandom store <- newFileStore storeLog <- mapM (`readWriteFileStore` store) storeLogFile @@ -108,9 +110,10 @@ newXFTPServerEnv config@XFTPServerConfig {storeLogFile, fileSizeQuota, xftpCrede logNote $ "Total / available storage: " <> tshow quota <> " / " <> tshow (quota - used) when (quota < used) $ logWarn "WARNING: storage quota is less than used storage, no files can be uploaded!" tlsServerCreds <- loadServerCredential xftpCredentials + httpServerCreds <- mapM loadServerCredential httpCredentials Fingerprint fp <- loadFingerprint xftpCredentials serverStats <- newFileServerStats =<< getCurrentTime - pure XFTPEnv {config, store, storeLog, random, tlsServerCreds, serverIdentity = C.KeyHash fp, serverStats} + pure XFTPEnv {config, store, storeLog, random, tlsServerCreds, httpServerCreds, serverIdentity = C.KeyHash fp, serverStats} countUsedStorage :: M.Map k FileRec -> Int64 countUsedStorage = M.foldl' (\acc FileRec {fileInfo = FileInfo {size}} -> acc + fromIntegral size) 0 diff --git a/src/Simplex/FileTransfer/Server/Main.hs b/src/Simplex/FileTransfer/Server/Main.hs index 944df1ca0e..1c09762f8f 100644 --- a/src/Simplex/FileTransfer/Server/Main.hs +++ b/src/Simplex/FileTransfer/Server/Main.hs @@ -12,7 +12,7 @@ import Data.Either (fromRight) import Data.Functor (($>)) import Data.Ini (lookupValue, readIniFile) import Data.Int (Int64) -import Data.Maybe (fromMaybe) +import Data.Maybe (fromMaybe, isJust) import qualified Data.Text as T import qualified Data.Text.IO as T import Network.Socket (HostName) @@ -21,7 +21,7 @@ import Simplex.FileTransfer.Chunks import Simplex.FileTransfer.Description (FileSize (..)) import Simplex.FileTransfer.Server (runXFTPServer) import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..), defFileExpirationHours, defaultFileExpiration, defaultInactiveClientExpiration) -import Simplex.FileTransfer.Transport (supportedFileServerVRange, alpnSupportedXFTPhandshakes) +import Simplex.FileTransfer.Transport (alpnSupportedXFTPhandshakes, supportedFileServerVRange) import qualified Simplex.Messaging.Crypto as C import Simplex.Messaging.Encoding.String import Simplex.Messaging.Protocol (ProtoServerWithAuth (..), pattern XFTPServer) @@ -29,7 +29,7 @@ import Simplex.Messaging.Server.CLI import Simplex.Messaging.Server.Expiration import Simplex.Messaging.Transport.Client (TransportHost (..)) import Simplex.Messaging.Transport.HTTP2 (httpALPN) -import Simplex.Messaging.Transport.Server (ServerCredentials (..), mkTransportServerConfig) +import Simplex.Messaging.Transport.Server (ServerCredentials (..), TransportServerConfig (..), mkTransportServerConfig) import Simplex.Messaging.Util (eitherToMaybe, safeDecodeUtf8, tshow) import System.Directory (createDirectoryIfMissing, doesFileExist) import System.FilePath (combine) @@ -124,6 +124,10 @@ xftpServerCLI cfgPath logPath = do \disconnect: off\n" <> ("# ttl: " <> tshow (ttl defaultInactiveClientExpiration) <> "\n") <> ("# check_interval: " <> tshow (checkInterval defaultInactiveClientExpiration) <> "\n") + <> "\n\ + \[WEB]\n\ + \# cert: /etc/opt/simplex-xftp/web.crt\n\ + \# key: /etc/opt/simplex-xftp/web.key\n" runServer ini = do hSetBuffering stdout LineBuffering hSetBuffering stderr LineBuffering @@ -155,6 +159,17 @@ xftpServerCLI cfgPath logPath = do else "NOT allowed." putStrLn $ "Listening on port " <> xftpPort <> "..." + httpCredentials_ = + eitherToMaybe $ do + cert <- T.unpack <$> lookupValue "WEB" "cert" ini + key <- T.unpack <$> lookupValue "WEB" "key" ini + pure + ServerCredentials + { caCertificateFile = Nothing, + certificateFile = cert, + privateKeyFile = key + } + serverConfig = XFTPServerConfig { xftpPort = T.unpack $ strictIni "TRANSPORT" "port" ini, @@ -186,6 +201,7 @@ xftpServerCLI cfgPath logPath = do privateKeyFile = c serverKeyFile, certificateFile = c serverCrtFile }, + httpCredentials = httpCredentials_, xftpServerVRange = supportedFileServerVRange, logStatsInterval = logStats $> 86400, -- seconds logStatsStartTime = 0, -- seconds from 00:00 UTC @@ -194,10 +210,12 @@ xftpServerCLI cfgPath logPath = do prometheusInterval = eitherToMaybe $ read . T.unpack <$> lookupValue "STORE_LOG" "prometheus_interval" ini, prometheusMetricsFile = combine logPath "xftp-server-metrics.txt", transportConfig = - mkTransportServerConfig - (fromMaybe False $ iniOnOff "TRANSPORT" "log_tls_errors" ini) - (Just $ alpnSupportedXFTPhandshakes <> httpALPN) - False, + let cfg = + mkTransportServerConfig + (fromMaybe False $ iniOnOff "TRANSPORT" "log_tls_errors" ini) + (Just $ alpnSupportedXFTPhandshakes <> httpALPN) + False + in cfg {addCORSHeaders = isJust httpCredentials_}, responseDelay = 0 } @@ -229,11 +247,14 @@ cliCommandP cfgPath logPath iniFile = initP :: Parser InitOptions initP = do enableStoreLog <- - flag' False + flag' + False ( long "disable-store-log" <> help "Disable store log for persistence (enabled by default)" ) - <|> flag True True + <|> flag + True + True ( long "store-log" <> short 'l' <> help "Enable store log for persistence (DEPRECATED, enabled by default)" diff --git a/src/Simplex/FileTransfer/Transport.hs b/src/Simplex/FileTransfer/Transport.hs index b7746f1cbe..bb94d55d78 100644 --- a/src/Simplex/FileTransfer/Transport.hs +++ b/src/Simplex/FileTransfer/Transport.hs @@ -19,6 +19,7 @@ module Simplex.FileTransfer.Transport -- xftpClientHandshake, XFTPServerHandshake (..), -- xftpServerHandshake, + XFTPClientHello (..), THandleXFTP, THandleParamsXFTP, VersionXFTP, @@ -60,7 +61,7 @@ import Simplex.Messaging.Parsers import Simplex.Messaging.Protocol (BlockingInfo, CommandError) import Simplex.Messaging.Transport (ALPN, CertChainPubKey, ServiceCredentials, SessionId, THandle (..), THandleParams (..), TransportError (..), TransportPeer (..)) import Simplex.Messaging.Transport.HTTP2.File -import Simplex.Messaging.Util (bshow, tshow) +import Simplex.Messaging.Util (bshow, tshow, (<$?>)) import Simplex.Messaging.Version import Simplex.Messaging.Version.Internal import System.IO (Handle, IOMode (..), withFile) @@ -111,11 +112,18 @@ alpnSupportedXFTPhandshakes = [xftpALPNv1] xftpALPNv1 :: ALPN xftpALPNv1 = "xftp/1" +data XFTPClientHello = XFTPClientHello + { -- | a random string sent by the client to the server to prove that server has identity certificate + webChallenge :: Maybe ByteString + } + data XFTPServerHandshake = XFTPServerHandshake { xftpVersionRange :: VersionRangeXFTP, sessionId :: SessionId, -- | pub key to agree shared secrets for command authorization and entity ID encryption. - authPubKey :: CertChainPubKey + authPubKey :: CertChainPubKey, + -- | signed identity challenge from XFTPClientHello + webIdentityProof :: Maybe C.ASignature } data XFTPClientHandshake = XFTPClientHandshake @@ -125,6 +133,14 @@ data XFTPClientHandshake = XFTPClientHandshake keyHash :: C.KeyHash } +instance Encoding XFTPClientHello where + smpEncode XFTPClientHello {webChallenge} = smpEncode webChallenge + smpP = do + webChallenge <- smpP + forM_ webChallenge $ \challenge -> unless (B.length challenge == 32) $ fail "bad XFTPClientHello webChallenge" + Tail _compat <- smpP + pure XFTPClientHello {webChallenge} + instance Encoding XFTPClientHandshake where smpEncode XFTPClientHandshake {xftpVersion, keyHash} = smpEncode (xftpVersion, keyHash) @@ -134,13 +150,13 @@ instance Encoding XFTPClientHandshake where pure XFTPClientHandshake {xftpVersion, keyHash} instance Encoding XFTPServerHandshake where - smpEncode XFTPServerHandshake {xftpVersionRange, sessionId, authPubKey} = - smpEncode (xftpVersionRange, sessionId, authPubKey) + smpEncode XFTPServerHandshake {xftpVersionRange, sessionId, authPubKey, webIdentityProof} = + smpEncode (xftpVersionRange, sessionId, authPubKey, C.signatureBytes webIdentityProof) smpP = do - (xftpVersionRange, sessionId) <- smpP - authPubKey <- smpP + (xftpVersionRange, sessionId, authPubKey) <- smpP + webIdentityProof <- C.decodeSignature <$?> smpP Tail _compat <- smpP - pure XFTPServerHandshake {xftpVersionRange, sessionId, authPubKey} + pure XFTPServerHandshake {xftpVersionRange, sessionId, authPubKey, webIdentityProof} sendEncFile :: Handle -> (Builder -> IO ()) -> LC.SbState -> Word32 -> IO () sendEncFile h send = go diff --git a/src/Simplex/Messaging/Transport/HTTP2/Server.hs b/src/Simplex/Messaging/Transport/HTTP2/Server.hs index 7152eb5a9e..8ece9488b2 100644 --- a/src/Simplex/Messaging/Transport/HTTP2/Server.hs +++ b/src/Simplex/Messaging/Transport/HTTP2/Server.hs @@ -16,7 +16,7 @@ import Numeric.Natural (Natural) import Simplex.Messaging.Server.Expiration import Simplex.Messaging.Transport (ALPN, SessionId, TLS, closeConnection, tlsALPN, tlsUniq) import Simplex.Messaging.Transport.HTTP2 -import Simplex.Messaging.Transport.Server (ServerCredentials, TransportServerConfig (..), loadServerCredential, runTransportServer) +import Simplex.Messaging.Transport.Server (SNICredentialUsed, ServerCredentials, TLSServerCredential (..), TransportServerConfig (..), loadServerCredential, newSocketState, runTransportServerState_) import Simplex.Messaging.Util (threadDelay') import UnliftIO (finally) import UnliftIO.Concurrent (forkIO, killThread) @@ -54,7 +54,7 @@ getHTTP2Server HTTP2ServerConfig {qSize, http2Port, bufferSize, bodyHeadSize, se started <- newEmptyTMVarIO reqQ <- newTBQueueIO qSize action <- async $ - runHTTP2Server started http2Port bufferSize serverSupported srvCreds transportConfig Nothing (const $ pure ()) $ \sessionId sessionALPN r sendResponse -> do + runHTTP2Server started http2Port bufferSize serverSupported srvCreds Nothing transportConfig Nothing (const $ pure ()) $ \_sniUsed sessionId sessionALPN r sendResponse -> do reqBody <- getHTTP2Body r bodyHeadSize atomically $ writeTBQueue reqQ HTTP2Request {sessionId, sessionALPN, request = r, reqBody, sendResponse} void . atomically $ takeTMVar started @@ -63,24 +63,33 @@ getHTTP2Server HTTP2ServerConfig {qSize, http2Port, bufferSize, bodyHeadSize, se closeHTTP2Server :: HTTP2Server -> IO () closeHTTP2Server = uninterruptibleCancel . action -runHTTP2Server :: TMVar Bool -> ServiceName -> BufferSize -> T.Supported -> T.Credential -> TransportServerConfig -> Maybe ExpirationConfig -> (SessionId -> IO ()) -> HTTP2ServerFunc -> IO () -runHTTP2Server started port bufferSize srvSupported srvCreds transportConfig expCfg_ clientFinished = runHTTP2ServerWith_ expCfg_ clientFinished bufferSize setup +runHTTP2Server :: TMVar Bool -> ServiceName -> BufferSize -> T.Supported -> T.Credential -> Maybe T.Credential -> TransportServerConfig -> Maybe ExpirationConfig -> (SessionId -> IO ()) -> (SNICredentialUsed -> HTTP2ServerFunc) -> IO () +runHTTP2Server started port bufferSize srvSupported srvCreds httpCreds_ transportConfig expCfg_ clientFinished = runHTTP2ServerWith_ expCfg_ clientFinished bufferSize setup where - setup = runTransportServer started port srvSupported srvCreds transportConfig + setup handler = do + ss <- newSocketState + let combinedCreds = TLSServerCredential {credential = srvCreds, sniCredential = httpCreds_} + runTransportServerState_ ss started port srvSupported combinedCreds transportConfig $ \_ -> handler -- HTTP2 server can be run on both client and server TLS connections. runHTTP2ServerWith :: BufferSize -> ((TLS p -> IO ()) -> a) -> HTTP2ServerFunc -> a -runHTTP2ServerWith = runHTTP2ServerWith_ Nothing (\_sessId -> pure ()) +runHTTP2ServerWith bufferSize tlsSetup http2Server = + runHTTP2ServerWith_ + Nothing + (\_sessId -> pure ()) + bufferSize + (\handler -> tlsSetup $ \tls -> handler (False, tls)) + (const http2Server) -runHTTP2ServerWith_ :: Maybe ExpirationConfig -> (SessionId -> IO ()) -> BufferSize -> ((TLS p -> IO ()) -> a) -> HTTP2ServerFunc -> a -runHTTP2ServerWith_ expCfg_ clientFinished bufferSize setup http2Server = setup $ \tls -> do +runHTTP2ServerWith_ :: Maybe ExpirationConfig -> (SessionId -> IO ()) -> BufferSize -> (((SNICredentialUsed, TLS p) -> IO ()) -> a) -> (SNICredentialUsed -> HTTP2ServerFunc) -> a +runHTTP2ServerWith_ expCfg_ clientFinished bufferSize setup http2Server = setup $ \(sniUsed, tls) -> do activeAt <- newTVarIO =<< getSystemTime tid_ <- mapM (forkIO . expireInactiveClient tls activeAt) expCfg_ - withHTTP2 bufferSize (run tls activeAt) (clientFinished $ tlsUniq tls) tls `finally` mapM_ killThread tid_ + withHTTP2 bufferSize (run sniUsed tls activeAt) (clientFinished $ tlsUniq tls) tls `finally` mapM_ killThread tid_ where - run tls activeAt cfg = H.run cfg $ \req _aux sendResp -> do + run sniUsed tls activeAt cfg = H.run cfg $ \req _aux sendResp -> do getSystemTime >>= atomically . writeTVar activeAt - http2Server (tlsUniq tls) (tlsALPN tls) req (`sendResp` []) + http2Server sniUsed (tlsUniq tls) (tlsALPN tls) req (`sendResp` []) expireInactiveClient tls activeAt expCfg = loop where loop = do diff --git a/src/Simplex/Messaging/Transport/Server.hs b/src/Simplex/Messaging/Transport/Server.hs index 00b94ddc5b..cdfc300b71 100644 --- a/src/Simplex/Messaging/Transport/Server.hs +++ b/src/Simplex/Messaging/Transport/Server.hs @@ -11,6 +11,7 @@ module Simplex.Messaging.Transport.Server ( TransportServerConfig (..), ServerCredentials (..), TLSServerCredential (..), + SNICredentialUsed, AddHTTP, mkTransportServerConfig, runTransportServerState, @@ -62,6 +63,7 @@ data TransportServerConfig = TransportServerConfig { logTLSErrors :: Bool, serverALPN :: Maybe [ALPN], askClientCert :: Bool, + addCORSHeaders :: Bool, tlsSetupTimeout :: Int, transportTimeout :: Int } @@ -91,6 +93,7 @@ mkTransportServerConfig logTLSErrors serverALPN askClientCert = { logTLSErrors, serverALPN, askClientCert, + addCORSHeaders = False, tlsSetupTimeout = 60000000, transportTimeout = 40000000 } @@ -274,9 +277,10 @@ paramsAskClientCert clientCert params = { T.serverWantClientCert = True, T.serverHooks = (T.serverHooks params) - { T.onClientCertificate = \cc -> validateClientCertificate cc >>= \case - Just reason -> T.CertificateUsageReject reason <$ atomically (tryPutTMVar clientCert Nothing) - Nothing -> T.CertificateUsageAccept <$ atomically (tryPutTMVar clientCert $ Just cc) + { T.onClientCertificate = \cc -> + validateClientCertificate cc >>= \case + Just reason -> T.CertificateUsageReject reason <$ atomically (tryPutTMVar clientCert Nothing) + Nothing -> T.CertificateUsageAccept <$ atomically (tryPutTMVar clientCert $ Just cc) } } diff --git a/tests/Test.hs b/tests/Test.hs index 2ed0bda9e6..dcc5de3fb1 100644 --- a/tests/Test.hs +++ b/tests/Test.hs @@ -35,6 +35,7 @@ import Util import XFTPAgent import XFTPCLI import XFTPServerTests (xftpServerTests) +import XFTPWebTests (xftpWebTests) #if defined(dbPostgres) import Fixtures @@ -149,6 +150,7 @@ main = do describe "XFTP file description" fileDescriptionTests describe "XFTP CLI" xftpCLITests describe "XFTP agent" xftpAgentTests + describe "XFTP Web Client" xftpWebTests describe "XRCP" remoteControlTests describe "Server CLIs" cliTests diff --git a/tests/XFTPClient.hs b/tests/XFTPClient.hs index bd62afa068..f0d1e3a61d 100644 --- a/tests/XFTPClient.hs +++ b/tests/XFTPClient.hs @@ -15,8 +15,9 @@ import Simplex.FileTransfer.Client import Simplex.FileTransfer.Description import Simplex.FileTransfer.Server (runXFTPServerBlocking) import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..), defaultFileExpiration, defaultInactiveClientExpiration) -import Simplex.FileTransfer.Transport (supportedFileServerVRange, alpnSupportedXFTPhandshakes) +import Simplex.FileTransfer.Transport (alpnSupportedXFTPhandshakes, supportedFileServerVRange) import Simplex.Messaging.Protocol (XFTPServer) +import Simplex.Messaging.Transport.HTTP2 (httpALPN) import Simplex.Messaging.Transport.Server import Test.Hspec hiding (fit, it) @@ -125,6 +126,7 @@ testXFTPServerConfig = privateKeyFile = "tests/fixtures/server.key", certificateFile = "tests/fixtures/server.crt" }, + httpCredentials = Nothing, xftpServerVRange = supportedFileServerVRange, logStatsInterval = Nothing, logStatsStartTime = 0, @@ -148,3 +150,44 @@ testXFTPClientWith cfg client = do getXFTPClient (1, testXFTPServer, Nothing) cfg [] ts (\_ -> pure ()) >>= \case Right c -> client c Left e -> error $ show e + +testXFTPServerConfigSNI :: XFTPServerConfig +testXFTPServerConfigSNI = + testXFTPServerConfig + { httpCredentials = + Just + ServerCredentials + { caCertificateFile = Nothing, + privateKeyFile = "tests/fixtures/web.key", + certificateFile = "tests/fixtures/web.crt" + }, + transportConfig = + (mkTransportServerConfig True (Just $ alpnSupportedXFTPhandshakes <> httpALPN) False) + { addCORSHeaders = True + } + } + +withXFTPServerSNI :: HasCallStack => (HasCallStack => ThreadId -> IO a) -> IO a +withXFTPServerSNI = withXFTPServerCfg testXFTPServerConfigSNI + +testXFTPServerConfigEd25519SNI :: XFTPServerConfig +testXFTPServerConfigEd25519SNI = + testXFTPServerConfig + { xftpCredentials = + ServerCredentials + { caCertificateFile = Just "tests/fixtures/ed25519/ca.crt", + privateKeyFile = "tests/fixtures/ed25519/server.key", + certificateFile = "tests/fixtures/ed25519/server.crt" + }, + httpCredentials = + Just + ServerCredentials + { caCertificateFile = Nothing, + privateKeyFile = "tests/fixtures/web.key", + certificateFile = "tests/fixtures/web.crt" + }, + transportConfig = + (mkTransportServerConfig True (Just $ alpnSupportedXFTPhandshakes <> httpALPN) False) + { addCORSHeaders = True + } + } diff --git a/tests/XFTPServerTests.hs b/tests/XFTPServerTests.hs index c1d34177f3..db1ff6bd49 100644 --- a/tests/XFTPServerTests.hs +++ b/tests/XFTPServerTests.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE DuplicateRecordFields #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedLists #-} @@ -13,23 +14,37 @@ import Control.Exception (SomeException) import Control.Monad import Control.Monad.Except import Control.Monad.IO.Unlift +import qualified Crypto.PubKey.RSA as RSA import qualified Data.ByteString.Base64.URL as B64 +import Data.ByteString.Builder (byteString) import Data.ByteString.Char8 (ByteString) import qualified Data.ByteString.Char8 as B import qualified Data.ByteString.Lazy.Char8 as LB -import Data.List (isInfixOf) +import qualified Data.CaseInsensitive as CI +import Data.List (find, isInfixOf) import Data.Time.Clock (getCurrentTime) +import qualified Data.X509 as X +import Data.X509.Validation (Fingerprint (..), getFingerprint) +import Network.HPACK.Token (tokenKey) +import qualified Network.HTTP2.Client as H2 import ServerTests (logSize) import Simplex.FileTransfer.Client import Simplex.FileTransfer.Description (kb) -import Simplex.FileTransfer.Protocol (FileInfo (..), XFTPFileId) +import Simplex.FileTransfer.Protocol (FileInfo (..), XFTPFileId, xftpBlockSize) import Simplex.FileTransfer.Server.Env (XFTPServerConfig (..)) -import Simplex.FileTransfer.Transport (XFTPErrorType (..), XFTPRcvChunkSpec (..)) +import Simplex.FileTransfer.Transport (XFTPClientHandshake (..), XFTPClientHello (..), XFTPErrorType (..), XFTPRcvChunkSpec (..), XFTPServerHandshake (..), pattern VersionXFTP) import Simplex.Messaging.Client (ProtocolClientError (..)) import qualified Simplex.Messaging.Crypto as C import qualified Simplex.Messaging.Crypto.Lazy as LC +import Simplex.Messaging.Encoding (smpDecode, smpEncode) import Simplex.Messaging.Protocol (BasicAuth, EntityId (..), pattern NoEntity) import Simplex.Messaging.Server.Expiration (ExpirationConfig (..)) +import Simplex.Messaging.Transport (CertChainPubKey (..), TLS (..), TransportPeer (..), defaultSupportedParams, defaultSupportedParamsHTTPS) +import Simplex.Messaging.Transport.Client (TransportClientConfig (..), TransportHost (..), defaultTransportClientConfig, runTLSTransportClient) +import Simplex.Messaging.Transport.HTTP2 (HTTP2Body (..)) +import qualified Simplex.Messaging.Transport.HTTP2.Client as HC +import Simplex.Messaging.Transport.Server (loadFileFingerprint) +import Simplex.Messaging.Transport.Shared (ChainCertificates (..), chainIdCaCerts) import System.Directory (createDirectoryIfMissing, removeDirectoryRecursive, removeFile) import System.FilePath (()) import Test.Hspec hiding (fit, it) @@ -39,10 +54,8 @@ import XFTPClient xftpServerTests :: Spec xftpServerTests = - before_ (createDirectoryIfMissing False xftpServerFiles) - . after_ (removeDirectoryRecursive xftpServerFiles) - . describe "XFTP file chunk delivery" - $ do + before_ (createDirectoryIfMissing False xftpServerFiles) . after_ (removeDirectoryRecursive xftpServerFiles) $ do + describe "XFTP file chunk delivery" $ do it "should create, upload and receive file chunk (1 client)" testFileChunkDelivery it "should create, upload and receive file chunk (2 clients)" testFileChunkDelivery2 it "should create, add recipients, upload and receive file chunk" testFileChunkDeliveryAddRecipients @@ -63,6 +76,14 @@ xftpServerTests = it "allowed with correct basic auth" $ testFileBasicAuth True (Just "pwd") (Just "pwd") True it "allowed with auth on server without auth" $ testFileBasicAuth True Nothing (Just "any") True it "should not change content for uploaded and committed files" testFileSkipCommitted + describe "XFTP SNI and CORS" $ do + it "should select web certificate when SNI is used" testSNICertSelection + it "should select XFTP certificate when SNI is not used" testNoSNICertSelection + it "should add CORS headers when SNI is used" testCORSHeaders + it "should respond to OPTIONS preflight with CORS headers" testCORSPreflight + it "should not add CORS headers without SNI" testNoCORSWithoutSNI + it "should upload and receive file chunk through SNI-enabled server" testFileChunkDeliverySNI + it "should complete web handshake with challenge-response" testWebHandshake chSize :: Integral a => a chSize = kb 128 @@ -395,3 +416,127 @@ testFileSkipCommitted = uploadXFTPChunk c spKey sId chunkSpec -- upload again to get FROk without getting stuck downloadXFTPChunk g c rpKey rId $ XFTPRcvChunkSpec "tests/tmp/received_chunk" chSize digest liftIO $ B.readFile "tests/tmp/received_chunk" `shouldReturn` bytes -- new chunk content got ignored + +-- SNI and CORS tests + +lookupResponseHeader :: B.ByteString -> H2.Response -> Maybe B.ByteString +lookupResponseHeader name resp = + snd <$> find (\(t, _) -> tokenKey t == CI.mk name) (fst $ H2.responseHeaders resp) + +getCerts :: TLS 'TClient -> [X.Certificate] +getCerts tls = + let X.CertificateChain cc = tlsPeerCert tls + in map (X.signedObject . X.getSigned) cc + +testSNICertSelection :: Expectation +testSNICertSelection = + withXFTPServerSNI $ \_ -> do + Fingerprint fpHTTP <- loadFileFingerprint "tests/fixtures/ca.crt" + let caHTTP = C.KeyHash fpHTTP + cfg = defaultTransportClientConfig {clientALPN = Just ["h2"], useSNI = True} + runTLSTransportClient defaultSupportedParamsHTTPS Nothing cfg Nothing "localhost" xftpTestPort (Just caHTTP) $ \(tls :: TLS 'TClient) -> do + tlsALPN tls `shouldBe` Just "h2" + case getCerts tls of + X.Certificate {X.certPubKey = X.PubKeyRSA rsa} : _ -> RSA.public_size rsa `shouldSatisfy` (> 0) + leaf : _ -> expectationFailure $ "Expected RSA cert, got: " <> show (X.certPubKey leaf) + [] -> expectationFailure "Empty certificate chain" + +testNoSNICertSelection :: Expectation +testNoSNICertSelection = + withXFTPServerSNI $ \_ -> do + Fingerprint fpXFTP <- loadFileFingerprint "tests/fixtures/ca.crt" + let caXFTP = C.KeyHash fpXFTP + cfg = defaultTransportClientConfig {clientALPN = Just ["xftp/1"], useSNI = False} + runTLSTransportClient defaultSupportedParams Nothing cfg Nothing "localhost" xftpTestPort (Just caXFTP) $ \(tls :: TLS 'TClient) -> do + tlsALPN tls `shouldBe` Just "xftp/1" + case getCerts tls of + X.Certificate {X.certPubKey = X.PubKeyEd448 _} : _ -> pure () + leaf : _ -> expectationFailure $ "Expected Ed448 cert, got: " <> show (X.certPubKey leaf) + [] -> expectationFailure "Empty certificate chain" + +testCORSHeaders :: Expectation +testCORSHeaders = + withXFTPServerSNI $ \_ -> do + Fingerprint fpHTTP <- loadFileFingerprint "tests/fixtures/ca.crt" + let caHTTP = C.KeyHash fpHTTP + cfg = defaultTransportClientConfig {clientALPN = Just ["h2"], useSNI = True} + runTLSTransportClient defaultSupportedParamsHTTPS Nothing cfg Nothing "localhost" xftpTestPort (Just caHTTP) $ \(tls :: TLS 'TClient) -> do + let h2cfg = HC.defaultHTTP2ClientConfig {HC.bodyHeadSize = 65536} + h2 <- either (error . show) pure =<< HC.attachHTTP2Client h2cfg (THDomainName "localhost") xftpTestPort mempty 65536 tls + let req = H2.requestNoBody "POST" "/" [] + HC.HTTP2Response {HC.response} <- either (error . show) pure =<< HC.sendRequest h2 req (Just 5000000) + lookupResponseHeader "access-control-allow-origin" response `shouldBe` Just "*" + lookupResponseHeader "access-control-expose-headers" response `shouldBe` Just "*" + +testCORSPreflight :: Expectation +testCORSPreflight = + withXFTPServerSNI $ \_ -> do + Fingerprint fpHTTP <- loadFileFingerprint "tests/fixtures/ca.crt" + let caHTTP = C.KeyHash fpHTTP + cfg = defaultTransportClientConfig {clientALPN = Just ["h2"], useSNI = True} + runTLSTransportClient defaultSupportedParamsHTTPS Nothing cfg Nothing "localhost" xftpTestPort (Just caHTTP) $ \(tls :: TLS 'TClient) -> do + let h2cfg = HC.defaultHTTP2ClientConfig {HC.bodyHeadSize = 65536} + h2 <- either (error . show) pure =<< HC.attachHTTP2Client h2cfg (THDomainName "localhost") xftpTestPort mempty 65536 tls + let req = H2.requestNoBody "OPTIONS" "/" [] + HC.HTTP2Response {HC.response} <- either (error . show) pure =<< HC.sendRequest h2 req (Just 5000000) + lookupResponseHeader "access-control-allow-origin" response `shouldBe` Just "*" + lookupResponseHeader "access-control-allow-methods" response `shouldBe` Just "POST, OPTIONS" + lookupResponseHeader "access-control-allow-headers" response `shouldBe` Just "*" + lookupResponseHeader "access-control-max-age" response `shouldBe` Just "86400" + +testNoCORSWithoutSNI :: Expectation +testNoCORSWithoutSNI = + withXFTPServerSNI $ \_ -> do + Fingerprint fpXFTP <- loadFileFingerprint "tests/fixtures/ca.crt" + let caXFTP = C.KeyHash fpXFTP + cfg = defaultTransportClientConfig {clientALPN = Just ["xftp/1"], useSNI = False} + runTLSTransportClient defaultSupportedParams Nothing cfg Nothing "localhost" xftpTestPort (Just caXFTP) $ \(tls :: TLS 'TClient) -> do + let h2cfg = HC.defaultHTTP2ClientConfig {HC.bodyHeadSize = 65536} + h2 <- either (error . show) pure =<< HC.attachHTTP2Client h2cfg (THDomainName "localhost") xftpTestPort mempty 65536 tls + let req = H2.requestNoBody "POST" "/" [] + HC.HTTP2Response {HC.response} <- either (error . show) pure =<< HC.sendRequest h2 req (Just 5000000) + lookupResponseHeader "access-control-allow-origin" response `shouldBe` Nothing + +testFileChunkDeliverySNI :: Expectation +testFileChunkDeliverySNI = + withXFTPServerSNI $ \_ -> testXFTPClient $ \c -> runRight_ $ runTestFileChunkDelivery c c + +testWebHandshake :: Expectation +testWebHandshake = + withXFTPServerSNI $ \_ -> do + Fingerprint fp <- loadFileFingerprint "tests/fixtures/ca.crt" + let keyHash = C.KeyHash fp + cfg = defaultTransportClientConfig {clientALPN = Just ["h2"], useSNI = True} + runTLSTransportClient defaultSupportedParamsHTTPS Nothing cfg Nothing "localhost" xftpTestPort (Just keyHash) $ \(tls :: TLS 'TClient) -> do + let h2cfg = HC.defaultHTTP2ClientConfig {HC.bodyHeadSize = 65536} + h2 <- either (error . show) pure =<< HC.attachHTTP2Client h2cfg (THDomainName "localhost") xftpTestPort mempty 65536 tls + -- Send web challenge as XFTPClientHello + g <- C.newRandom + challenge <- atomically $ C.randomBytes 32 g + let helloBody = smpEncode (XFTPClientHello {webChallenge = Just challenge}) + helloReq = H2.requestBuilder "POST" "/" [] $ byteString helloBody + resp1 <- either (error . show) pure =<< HC.sendRequest h2 helloReq (Just 5000000) + let serverHsBody = bodyHead (HC.respBody resp1) + -- Decode server handshake + serverHsDecoded <- either (error . show) pure $ C.unPad serverHsBody + XFTPServerHandshake {sessionId, authPubKey = CertChainPubKey {certChain, signedPubKey}, webIdentityProof} <- + either error pure $ smpDecode serverHsDecoded + sig <- maybe (error "expected webIdentityProof") pure webIdentityProof + -- Verify cert chain identity + (leafCert, idCert) <- case chainIdCaCerts certChain of + CCValid {leafCert, idCert} -> pure (leafCert, idCert) + _ -> error "expected CCValid chain" + let Fingerprint idCertFP = getFingerprint idCert X.HashSHA256 + C.KeyHash idCertFP `shouldBe` keyHash + -- Verify challenge signature (identity proof) + leafPubKey <- either error pure $ C.x509ToPublic' $ X.certPubKey $ X.signedObject $ X.getSigned leafCert + C.verify leafPubKey sig (challenge <> sessionId) `shouldBe` True + -- Verify signedPubKey (DH key auth) + void $ either error pure $ C.verifyX509 leafPubKey signedPubKey + -- Send client handshake with echoed challenge + let clientHs = XFTPClientHandshake {xftpVersion = VersionXFTP 1, keyHash} + clientHsPadded <- either (error . show) pure $ C.pad (smpEncode clientHs) xftpBlockSize + let clientHsReq = H2.requestBuilder "POST" "/" [] $ byteString clientHsPadded + resp2 <- either (error . show) pure =<< HC.sendRequest h2 clientHsReq (Just 5000000) + let ackBody = bodyHead (HC.respBody resp2) + B.length ackBody `shouldBe` 0 diff --git a/tests/XFTPWebTests.hs b/tests/XFTPWebTests.hs new file mode 100644 index 0000000000..bc68cfe285 --- /dev/null +++ b/tests/XFTPWebTests.hs @@ -0,0 +1,3214 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ScopedTypeVariables #-} + +-- | Per-function tests for the xftp-web TypeScript XFTP client library. +-- Each test calls the Haskell function and the corresponding TypeScript function +-- via node, then asserts byte-identical output. +-- +-- Prerequisites: cd xftp-web && npm install && npm run build +-- Run: cabal test --test-option=--match="/XFTP Web Client/" +module XFTPWebTests (xftpWebTests) where + +import Control.Concurrent (forkIO, newEmptyMVar, putMVar, takeMVar) +import Control.Monad (replicateM, when) +import Crypto.Error (throwCryptoError) +import qualified Crypto.PubKey.Curve25519 as X25519 +import qualified Crypto.PubKey.Ed25519 as Ed25519 +import qualified Data.ByteArray as BA +import qualified Data.ByteString as B +import qualified Data.ByteString.Lazy as LB +import Data.Int (Int64) +import Data.List (intercalate) +import qualified Data.List.NonEmpty as NE +import Data.Word (Word8, Word16, Word32) +import System.Random (randomIO) +import Data.X509.Validation (Fingerprint (..)) +import Simplex.FileTransfer.Client (prepareChunkSizes) +import Simplex.FileTransfer.Description (FileDescription (..), FileSize (..), ValidFileDescription, pattern ValidFileDescription) +import Simplex.FileTransfer.Protocol (FileParty (..)) +import Simplex.FileTransfer.Transport (XFTPClientHello (..)) +import Simplex.FileTransfer.Types (FileHeader (..)) +import qualified Simplex.Messaging.Crypto as C +import qualified Simplex.Messaging.Crypto.Lazy as LC +import Simplex.Messaging.Encoding +import Simplex.Messaging.Encoding.String (strDecode, strEncode) +import Simplex.Messaging.Transport.Server (loadFileFingerprint) +import System.Directory (createDirectoryIfMissing, doesDirectoryExist, removeDirectoryRecursive) +import System.Environment (getEnvironment) +import System.Exit (ExitCode (..)) +import System.Process (CreateProcess (..), StdStream (..), createProcess, proc, waitForProcess) +import Test.Hspec hiding (fit, it) +import Util +import Simplex.FileTransfer.Server.Env (XFTPServerConfig) +import XFTPClient (testXFTPServerConfigEd25519SNI, testXFTPServerConfigSNI, withXFTPServerCfg, xftpTestPort) +import AgentTests.FunctionalAPITests (rfGet, runRight, runRight_, sfGet, withAgent) +import Simplex.Messaging.Agent (AgentClient, xftpReceiveFile, xftpSendFile, xftpStartWorkers) +import Simplex.Messaging.Agent.Protocol (AEvent (..)) +import SMPAgentClient (agentCfg, initAgentServers, testDB) +import XFTPCLI (recipientFiles, senderFiles) +import qualified Simplex.Messaging.Crypto.File as CF + +xftpWebDir :: FilePath +xftpWebDir = "xftp-web" + +-- | Run an inline ES module script via node, return stdout as ByteString. +callNode :: String -> IO B.ByteString +callNode script = do + baseEnv <- getEnvironment + let nodeEnv = ("NODE_TLS_REJECT_UNAUTHORIZED", "0") : baseEnv + (_, Just hout, Just herr, ph) <- + createProcess + (proc "node" ["--input-type=module", "-e", script]) + { std_out = CreatePipe, + std_err = CreatePipe, + cwd = Just xftpWebDir, + env = Just nodeEnv + } + errVar <- newEmptyMVar + _ <- forkIO $ B.hGetContents herr >>= putMVar errVar + out <- B.hGetContents hout + err <- takeMVar errVar + ec <- waitForProcess ph + when (ec /= ExitSuccess) $ + expectationFailure $ + "node " <> show ec <> "\nstderr: " <> map (toEnum . fromIntegral) (B.unpack err) + pure out + +-- | Format a ByteString as a JS Uint8Array constructor. +jsUint8 :: B.ByteString -> String +jsUint8 bs = "new Uint8Array([" <> intercalate "," (map show (B.unpack bs)) <> "])" + +-- Import helpers for inline scripts. +impEnc, impPad, impDig, impKey, impSb :: String +impEnc = "import * as E from './dist/protocol/encoding.js';" +impPad = "import * as P from './dist/crypto/padding.js';" +impDig = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as D from './dist/crypto/digest.js';" + <> "await sodium.ready;" +impKey = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as K from './dist/crypto/keys.js';" + <> "await sodium.ready;" +impSb = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as S from './dist/crypto/secretbox.js';" + <> "await sodium.ready;" +impFile :: String +impFile = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as F from './dist/crypto/file.js';" + <> "await sodium.ready;" +impCmd :: String +impCmd = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as E from './dist/protocol/encoding.js';" + <> "import * as Cmd from './dist/protocol/commands.js';" + <> "await sodium.ready;" +impTx :: String +impTx = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as E from './dist/protocol/encoding.js';" + <> "import * as K from './dist/crypto/keys.js';" + <> "import * as Tx from './dist/protocol/transmission.js';" + <> "await sodium.ready;" +impHs :: String +impHs = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as E from './dist/protocol/encoding.js';" + <> "import * as K from './dist/crypto/keys.js';" + <> "import * as Hs from './dist/protocol/handshake.js';" + <> "await sodium.ready;" +impId :: String +impId = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as E from './dist/protocol/encoding.js';" + <> "import * as K from './dist/crypto/keys.js';" + <> "import * as Id from './dist/crypto/identity.js';" + <> "await sodium.ready;" +impDesc :: String +impDesc = "import * as Desc from './dist/protocol/description.js';" +impChk :: String +impChk = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as Desc from './dist/protocol/description.js';" + <> "import * as Chk from './dist/protocol/chunks.js';" + <> "await sodium.ready;" +impCli :: String +impCli = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as K from './dist/crypto/keys.js';" + <> "import * as Cli from './dist/protocol/client.js';" + <> "await sodium.ready;" +impDl :: String +impDl = + "import sodium from 'libsodium-wrappers-sumo';" + <> "import * as K from './dist/crypto/keys.js';" + <> "import * as F from './dist/crypto/file.js';" + <> "import * as Cli from './dist/protocol/client.js';" + <> "import * as Dl from './dist/download.js';" + <> "import * as Cmd from './dist/protocol/commands.js';" + <> "import * as Tx from './dist/protocol/transmission.js';" + <> "await sodium.ready;" + +impAddr :: String +impAddr = "import * as Addr from './dist/protocol/address.js';" + +-- | Wrap expression in process.stdout.write(Buffer.from(...)). +jsOut :: String -> String +jsOut expr = "process.stdout.write(Buffer.from(" <> expr <> "));" + +xftpWebTests :: Spec +xftpWebTests = do + distExists <- runIO $ doesDirectoryExist (xftpWebDir <> "/dist") + if distExists + then do + tsEncodingTests + tsPaddingTests + tsDigestTests + tsKeyTests + tsSecretboxTests + tsFileCryptoTests + tsCommandTests + tsTransmissionTests + tsHandshakeTests + tsIdentityTests + tsDescriptionTests + tsChunkTests + tsClientTests + tsDownloadTests + tsAddressTests + tsIntegrationTests + else + it "skipped (run 'cd xftp-web && npm install && npm run build' first)" $ + pendingWith "TS project not compiled" + +-- ── protocol/encoding ────────────────────────────────────────────── + +tsEncodingTests :: Spec +tsEncodingTests = describe "protocol/encoding" $ do + describe "encode" $ do + it "encodeWord16" $ do + let val = 42 :: Word16 + actual <- callNode $ impEnc <> jsOut ("E.encodeWord16(" <> show val <> ")") + actual `shouldBe` smpEncode val + + it "encodeWord16 max" $ do + let val = 65535 :: Word16 + actual <- callNode $ impEnc <> jsOut ("E.encodeWord16(" <> show val <> ")") + actual `shouldBe` smpEncode val + + it "encodeWord32" $ do + let val = 100000 :: Word32 + actual <- callNode $ impEnc <> jsOut ("E.encodeWord32(" <> show val <> ")") + actual `shouldBe` smpEncode val + + it "encodeInt64" $ do + let val = 1234567890123456789 :: Int64 + actual <- callNode $ impEnc <> jsOut ("E.encodeInt64(" <> show val <> "n)") + actual `shouldBe` smpEncode val + + it "encodeInt64 negative" $ do + let val = -42 :: Int64 + actual <- callNode $ impEnc <> jsOut ("E.encodeInt64(" <> show val <> "n)") + actual `shouldBe` smpEncode val + + it "encodeInt64 zero" $ do + let val = 0 :: Int64 + actual <- callNode $ impEnc <> jsOut ("E.encodeInt64(" <> show val <> "n)") + actual `shouldBe` smpEncode val + + it "encodeBytes" $ do + let val = "hello" :: B.ByteString + actual <- callNode $ impEnc <> jsOut ("E.encodeBytes(" <> jsUint8 val <> ")") + actual `shouldBe` smpEncode val + + it "encodeBytes empty" $ do + let val = "" :: B.ByteString + actual <- callNode $ impEnc <> jsOut ("E.encodeBytes(" <> jsUint8 val <> ")") + actual `shouldBe` smpEncode val + + it "encodeLarge" $ do + let val = "test data for large encoding" :: B.ByteString + actual <- callNode $ impEnc <> jsOut ("E.encodeLarge(" <> jsUint8 val <> ")") + actual `shouldBe` smpEncode (Large val) + + it "encodeTail" $ do + let val = "raw tail bytes" :: B.ByteString + actual <- callNode $ impEnc <> jsOut ("E.encodeTail(" <> jsUint8 val <> ")") + actual `shouldBe` smpEncode (Tail val) + + it "encodeBool True" $ do + actual <- callNode $ impEnc <> jsOut "E.encodeBool(true)" + actual `shouldBe` smpEncode True + + it "encodeBool False" $ do + actual <- callNode $ impEnc <> jsOut "E.encodeBool(false)" + actual `shouldBe` smpEncode False + + it "encodeString" $ do + let val = "hello" :: String + actual <- callNode $ impEnc <> jsOut "E.encodeString('hello')" + actual `shouldBe` smpEncode val + + it "encodeMaybe Nothing" $ do + actual <- callNode $ impEnc <> jsOut "E.encodeMaybe(E.encodeBytes, null)" + actual `shouldBe` smpEncode (Nothing :: Maybe B.ByteString) + + it "encodeMaybe Just" $ do + let val = "hello" :: B.ByteString + actual <- callNode $ impEnc <> jsOut ("E.encodeMaybe(E.encodeBytes, " <> jsUint8 val <> ")") + actual `shouldBe` smpEncode (Just val) + + it "encodeList" $ do + let vals = ["ab", "cd", "ef"] :: [B.ByteString] + actual <- + callNode $ + impEnc + <> "const xs = [" + <> intercalate "," (map jsUint8 vals) + <> "];" + <> jsOut "E.encodeList(E.encodeBytes, xs)" + actual `shouldBe` smpEncodeList vals + + it "encodeList empty" $ do + let vals = [] :: [B.ByteString] + actual <- + callNode $ + impEnc <> jsOut "E.encodeList(E.encodeBytes, [])" + actual `shouldBe` smpEncodeList vals + + it "encodeNonEmpty" $ do + let vals = ["ab", "cd"] :: [B.ByteString] + actual <- + callNode $ + impEnc + <> "const xs = [" + <> intercalate "," (map jsUint8 vals) + <> "];" + <> jsOut "E.encodeNonEmpty(E.encodeBytes, xs)" + actual `shouldBe` smpEncode (NE.fromList vals) + + describe "decode round-trips" $ do + it "decodeWord16" $ do + let encoded = smpEncode (42 :: Word16) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeWord16(E.decodeWord16(d))" + actual `shouldBe` encoded + + it "decodeWord32" $ do + let encoded = smpEncode (100000 :: Word32) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeWord32(E.decodeWord32(d))" + actual `shouldBe` encoded + + it "decodeInt64" $ do + let encoded = smpEncode (1234567890123456789 :: Int64) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeInt64(E.decodeInt64(d))" + actual `shouldBe` encoded + + it "decodeInt64 negative" $ do + let encoded = smpEncode (-42 :: Int64) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeInt64(E.decodeInt64(d))" + actual `shouldBe` encoded + + it "decodeBytes" $ do + let encoded = smpEncode ("hello" :: B.ByteString) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeBytes(E.decodeBytes(d))" + actual `shouldBe` encoded + + it "decodeLarge" $ do + let encoded = smpEncode (Large "large data") + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeLarge(E.decodeLarge(d))" + actual `shouldBe` encoded + + it "decodeBool" $ do + let encoded = smpEncode True + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeBool(E.decodeBool(d))" + actual `shouldBe` encoded + + it "decodeString" $ do + let encoded = smpEncode ("hello" :: String) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeString(E.decodeString(d))" + actual `shouldBe` encoded + + it "decodeMaybe Just" $ do + let encoded = smpEncode (Just ("hello" :: B.ByteString)) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeMaybe(E.encodeBytes, E.decodeMaybe(E.decodeBytes, d))" + actual `shouldBe` encoded + + it "decodeMaybe Nothing" $ do + let encoded = smpEncode (Nothing :: Maybe B.ByteString) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeMaybe(E.encodeBytes, E.decodeMaybe(E.decodeBytes, d))" + actual `shouldBe` encoded + + it "decodeList" $ do + let encoded = smpEncodeList (["ab", "cd", "ef"] :: [B.ByteString]) + actual <- + callNode $ + impEnc + <> "const d = new E.Decoder(" + <> jsUint8 encoded + <> ");" + <> jsOut "E.encodeList(E.encodeBytes, E.decodeList(E.decodeBytes, d))" + actual `shouldBe` encoded + +-- ── crypto/padding ───────────────────────────────────────────────── + +tsPaddingTests :: Spec +tsPaddingTests = describe "crypto/padding" $ do + it "pad" $ do + let msg = "hello" :: B.ByteString + paddedLen = 256 :: Int + expected = either (error . show) id $ C.pad msg paddedLen + actual <- callNode $ impPad <> jsOut ("P.pad(" <> jsUint8 msg <> ", " <> show paddedLen <> ")") + actual `shouldBe` expected + + it "pad minimal" $ do + let msg = "ab" :: B.ByteString + paddedLen = 16 :: Int + expected = either (error . show) id $ C.pad msg paddedLen + actual <- callNode $ impPad <> jsOut ("P.pad(" <> jsUint8 msg <> ", " <> show paddedLen <> ")") + actual `shouldBe` expected + + it "Haskell pad -> TS unPad" $ do + let msg = "cross-language test" :: B.ByteString + paddedLen = 128 :: Int + padded = either (error . show) id $ C.pad msg paddedLen + actual <- callNode $ impPad <> jsOut ("P.unPad(" <> jsUint8 padded <> ")") + actual `shouldBe` msg + + it "TS pad -> Haskell unPad" $ do + let msg = "ts to haskell" :: B.ByteString + paddedLen = 64 :: Int + tsPadded <- callNode $ impPad <> jsOut ("P.pad(" <> jsUint8 msg <> ", " <> show paddedLen <> ")") + let actual = either (error . show) id $ C.unPad tsPadded + actual `shouldBe` msg + + it "padLazy" $ do + let msg = "hello" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 64 :: Int64 + expected = either (error . show) id $ LC.pad (LB.fromStrict msg) msgLen paddedLen + actual <- + callNode $ + impPad <> jsOut ("P.padLazy(" <> jsUint8 msg <> ", " <> show msgLen <> "n, " <> show paddedLen <> "n)") + actual `shouldBe` LB.toStrict expected + + it "Haskell padLazy -> TS unPadLazy" $ do + let msg = "cross-language lazy" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 64 :: Int64 + padded = either (error . show) id $ LC.pad (LB.fromStrict msg) msgLen paddedLen + actual <- callNode $ impPad <> jsOut ("P.unPadLazy(" <> jsUint8 (LB.toStrict padded) <> ")") + actual `shouldBe` msg + + it "TS padLazy -> Haskell unPadLazy" $ do + let msg = "ts to haskell lazy" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 128 :: Int64 + tsPadded <- + callNode $ + impPad <> jsOut ("P.padLazy(" <> jsUint8 msg <> ", " <> show msgLen <> "n, " <> show paddedLen <> "n)") + let actual = either (error . show) id $ LC.unPad (LB.fromStrict tsPadded) + actual `shouldBe` LB.fromStrict msg + + it "splitLen" $ do + let msg = "test content" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 64 :: Int64 + padded = either (error . show) id $ LC.pad (LB.fromStrict msg) msgLen paddedLen + actual <- + callNode $ + impEnc + <> impPad + <> "const r = P.splitLen(" + <> jsUint8 (LB.toStrict padded) + <> ");" + <> "const len = E.encodeInt64(r.len);" + <> jsOut "E.concatBytes(len, r.content)" + let (expectedLen, expectedContent) = either (error . show) id $ LC.splitLen padded + expectedBytes = smpEncode expectedLen <> LB.toStrict expectedContent + actual `shouldBe` expectedBytes + +-- ── crypto/digest ────────────────────────────────────────────────── + +tsDigestTests :: Spec +tsDigestTests = describe "crypto/digest" $ do + it "sha256" $ do + let input = "hello world" :: B.ByteString + actual <- callNode $ impDig <> jsOut ("D.sha256(" <> jsUint8 input <> ")") + actual `shouldBe` C.sha256Hash input + + it "sha256 empty" $ do + let input = "" :: B.ByteString + actual <- callNode $ impDig <> jsOut ("D.sha256(" <> jsUint8 input <> ")") + actual `shouldBe` C.sha256Hash input + + it "sha512" $ do + let input = "hello world" :: B.ByteString + actual <- callNode $ impDig <> jsOut ("D.sha512(" <> jsUint8 input <> ")") + actual `shouldBe` C.sha512Hash input + + it "sha512 empty" $ do + let input = "" :: B.ByteString + actual <- callNode $ impDig <> jsOut ("D.sha512(" <> jsUint8 input <> ")") + actual `shouldBe` C.sha512Hash input + + it "sha256 binary" $ do + let input = B.pack [0, 1, 2, 255, 254, 128] + actual <- callNode $ impDig <> jsOut ("D.sha256(" <> jsUint8 input <> ")") + actual `shouldBe` C.sha256Hash input + +-- ── crypto/keys ────────────────────────────────────────────────── + +tsKeyTests :: Spec +tsKeyTests = describe "crypto/keys" $ do + describe "DER encoding" $ do + it "encodePubKeyEd25519" $ do + let rawPub = B.pack [1 .. 32] + derPrefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00] + expectedDer = derPrefix <> rawPub + actual <- callNode $ impKey <> jsOut ("K.encodePubKeyEd25519(" <> jsUint8 rawPub <> ")") + actual `shouldBe` expectedDer + + it "decodePubKeyEd25519" $ do + let rawPub = B.pack [1 .. 32] + derPrefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00] + der = derPrefix <> rawPub + actual <- callNode $ impKey <> jsOut ("K.decodePubKeyEd25519(" <> jsUint8 der <> ")") + actual `shouldBe` rawPub + + it "encodePubKeyX25519" $ do + let rawPub = B.pack [1 .. 32] + derPrefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + expectedDer = derPrefix <> rawPub + actual <- callNode $ impKey <> jsOut ("K.encodePubKeyX25519(" <> jsUint8 rawPub <> ")") + actual `shouldBe` expectedDer + + it "encodePrivKeyEd25519" $ do + let seed = B.pack [1 .. 32] + derPrefix = B.pack [0x30, 0x2e, 0x02, 0x01, 0x00, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x04, 0x22, 0x04, 0x20] + expectedDer = derPrefix <> seed + actual <- + callNode $ + impKey + <> "const kp = K.ed25519KeyPairFromSeed(" + <> jsUint8 seed + <> ");" + <> jsOut "K.encodePrivKeyEd25519(kp.privateKey)" + actual `shouldBe` expectedDer + + it "encodePrivKeyX25519" $ do + let rawPriv = B.pack [1 .. 32] + derPrefix = B.pack [0x30, 0x2e, 0x02, 0x01, 0x00, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x04, 0x22, 0x04, 0x20] + expectedDer = derPrefix <> rawPriv + actual <- callNode $ impKey <> jsOut ("K.encodePrivKeyX25519(" <> jsUint8 rawPriv <> ")") + actual `shouldBe` expectedDer + + it "DER round-trip Ed25519 pubkey" $ do + actual <- + callNode $ + impKey + <> "const kp = K.generateEd25519KeyPair();" + <> "const der = K.encodePubKeyEd25519(kp.publicKey);" + <> "const decoded = K.decodePubKeyEd25519(der);" + <> "const match = decoded.length === kp.publicKey.length && decoded.every((b,i) => b === kp.publicKey[i]);" + <> jsOut "new Uint8Array([match ? 1 : 0])" + actual `shouldBe` B.pack [1] + + it "encodePubKeyEd25519 matches Haskell" $ do + let seed = B.pack [1 .. 32] + sk = throwCryptoError $ Ed25519.secretKey seed + pk = Ed25519.toPublic sk + rawPub = BA.convert pk :: B.ByteString + haskellDer = C.encodePubKey (C.PublicKeyEd25519 pk) + tsDer <- callNode $ impKey <> jsOut ("K.encodePubKeyEd25519(" <> jsUint8 rawPub <> ")") + tsDer `shouldBe` haskellDer + + it "encodePubKeyX25519 matches Haskell" $ do + let rawPriv = B.pack [1 .. 32] + sk = throwCryptoError $ X25519.secretKey rawPriv + pk = X25519.toPublic sk + rawPub = BA.convert pk :: B.ByteString + haskellDer = C.encodePubKey (C.PublicKeyX25519 pk) + tsDer <- callNode $ impKey <> jsOut ("K.encodePubKeyX25519(" <> jsUint8 rawPub <> ")") + tsDer `shouldBe` haskellDer + + it "encodePrivKeyEd25519 matches Haskell" $ do + let seed = B.pack [1 .. 32] + sk = throwCryptoError $ Ed25519.secretKey seed + haskellDer = C.encodePrivKey (C.PrivateKeyEd25519 sk) + tsDer <- + callNode $ + impKey + <> "const kp = K.ed25519KeyPairFromSeed(" + <> jsUint8 seed + <> ");" + <> jsOut "K.encodePrivKeyEd25519(kp.privateKey)" + tsDer `shouldBe` haskellDer + + it "encodePrivKeyX25519 matches Haskell" $ do + let rawPriv = B.pack [1 .. 32] + sk = throwCryptoError $ X25519.secretKey rawPriv + haskellDer = C.encodePrivKey (C.PrivateKeyX25519 sk) + tsDer <- callNode $ impKey <> jsOut ("K.encodePrivKeyX25519(" <> jsUint8 rawPriv <> ")") + tsDer `shouldBe` haskellDer + + describe "Ed25519 sign/verify" $ do + it "sign determinism" $ do + let seed = B.pack [1 .. 32] + sk = throwCryptoError $ Ed25519.secretKey seed + pk = Ed25519.toPublic sk + msg = "deterministic test" :: B.ByteString + sig = Ed25519.sign sk pk msg + rawSig = BA.convert sig :: B.ByteString + actual <- + callNode $ + impKey + <> "const kp = K.ed25519KeyPairFromSeed(" + <> jsUint8 seed + <> ");" + <> jsOut ("K.sign(kp.privateKey, " <> jsUint8 msg <> ")") + actual `shouldBe` rawSig + + it "Haskell sign -> TS verify" $ do + let seed = B.pack [1 .. 32] + sk = throwCryptoError $ Ed25519.secretKey seed + pk = Ed25519.toPublic sk + msg = "cross-language sign test" :: B.ByteString + sig = Ed25519.sign sk pk msg + rawPub = BA.convert pk :: B.ByteString + rawSig = BA.convert sig :: B.ByteString + actual <- + callNode $ + impKey + <> "const ok = K.verify(" + <> jsUint8 rawPub + <> ", " + <> jsUint8 rawSig + <> ", " + <> jsUint8 msg + <> ");" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + actual `shouldBe` B.pack [1] + + it "TS sign -> Haskell verify" $ do + let seed = B.pack [1 .. 32] + sk = throwCryptoError $ Ed25519.secretKey seed + pk = Ed25519.toPublic sk + msg = "ts-to-haskell sign" :: B.ByteString + rawSig <- + callNode $ + impKey + <> "const kp = K.ed25519KeyPairFromSeed(" + <> jsUint8 seed + <> ");" + <> jsOut ("K.sign(kp.privateKey, " <> jsUint8 msg <> ")") + let sig = throwCryptoError $ Ed25519.signature rawSig + Ed25519.verify pk msg sig `shouldBe` True + + it "verify rejects wrong message" $ do + let seed = B.pack [1 .. 32] + sk = throwCryptoError $ Ed25519.secretKey seed + pk = Ed25519.toPublic sk + msg = "original message" :: B.ByteString + wrongMsg = "wrong message" :: B.ByteString + sig = Ed25519.sign sk pk msg + rawPub = BA.convert pk :: B.ByteString + rawSig = BA.convert sig :: B.ByteString + actual <- + callNode $ + impKey + <> "const ok = K.verify(" + <> jsUint8 rawPub + <> ", " + <> jsUint8 rawSig + <> ", " + <> jsUint8 wrongMsg + <> ");" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + actual `shouldBe` B.pack [0] + + describe "X25519 DH" $ do + it "DH cross-language" $ do + let seed1 = B.pack [1 .. 32] + seed2 = B.pack [33 .. 64] + sk1 = throwCryptoError $ X25519.secretKey seed1 + sk2 = throwCryptoError $ X25519.secretKey seed2 + pk2 = X25519.toPublic sk2 + dhHs = X25519.dh pk2 sk1 + rawPk2 = BA.convert pk2 :: B.ByteString + rawDh = BA.convert dhHs :: B.ByteString + actual <- + callNode $ + impKey <> jsOut ("K.dh(" <> jsUint8 rawPk2 <> ", " <> jsUint8 seed1 <> ")") + actual `shouldBe` rawDh + + it "DH commutativity" $ do + let seed1 = B.pack [1 .. 32] + seed2 = B.pack [33 .. 64] + sk1 = throwCryptoError $ X25519.secretKey seed1 + pk1 = X25519.toPublic sk1 + sk2 = throwCryptoError $ X25519.secretKey seed2 + pk2 = X25519.toPublic sk2 + rawPk1 = BA.convert pk1 :: B.ByteString + rawPk2 = BA.convert pk2 :: B.ByteString + dh1 <- + callNode $ + impKey <> jsOut ("K.dh(" <> jsUint8 rawPk2 <> ", " <> jsUint8 seed1 <> ")") + dh2 <- + callNode $ + impKey <> jsOut ("K.dh(" <> jsUint8 rawPk1 <> ", " <> jsUint8 seed2 <> ")") + dh1 `shouldBe` dh2 + + describe "keyHash" $ do + it "keyHash matches Haskell sha256Hash of DER" $ do + let rawPub = B.pack [1 .. 32] + derPrefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00] + der = derPrefix <> rawPub + expectedHash = C.sha256Hash der + actual <- + callNode $ + impKey + <> "const der = K.encodePubKeyEd25519(" + <> jsUint8 rawPub + <> ");" + <> jsOut "K.keyHash(der)" + actual `shouldBe` expectedHash + +-- ── crypto/secretbox ────────────────────────────────────────────── + +tsSecretboxTests :: Spec +tsSecretboxTests = describe "crypto/secretbox" $ do + let key32 = B.pack [1 .. 32] + nonce24 = B.pack [1 .. 24] + cbNonceVal = C.cbNonce nonce24 + sbKeyVal = C.unsafeSbKey key32 + + describe "NaCl secretbox (tag prepended)" $ do + it "cbEncrypt matches Haskell sbEncrypt_" $ do + let msg = "hello NaCl secretbox" :: B.ByteString + paddedLen = 256 :: Int + hsResult = either (error . show) id $ C.sbEncrypt_ key32 cbNonceVal msg paddedLen + tsResult <- + callNode $ + impSb <> jsOut ("S.cbEncrypt(" <> jsUint8 key32 <> "," <> jsUint8 nonce24 <> "," <> jsUint8 msg <> "," <> show paddedLen <> ")") + tsResult `shouldBe` hsResult + + it "Haskell sbEncrypt_ -> TS cbDecrypt" $ do + let msg = "cross-language decrypt" :: B.ByteString + paddedLen = 128 :: Int + cipher = either (error . show) id $ C.sbEncrypt_ key32 cbNonceVal msg paddedLen + tsResult <- + callNode $ + impSb <> jsOut ("S.cbDecrypt(" <> jsUint8 key32 <> "," <> jsUint8 nonce24 <> "," <> jsUint8 cipher <> ")") + tsResult `shouldBe` msg + + it "TS cbEncrypt -> Haskell sbDecrypt_" $ do + let msg = "ts-to-haskell NaCl" :: B.ByteString + paddedLen = 64 :: Int + tsCipher <- + callNode $ + impSb <> jsOut ("S.cbEncrypt(" <> jsUint8 key32 <> "," <> jsUint8 nonce24 <> "," <> jsUint8 msg <> "," <> show paddedLen <> ")") + let hsResult = either (error . show) id $ C.sbDecrypt_ key32 cbNonceVal tsCipher + hsResult `shouldBe` msg + + describe "streaming tail-tag" $ do + it "sbEncryptTailTag matches Haskell" $ do + let msg = "hello streaming" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 64 :: Int64 + hsResult = + either (error . show) id $ + LC.sbEncryptTailTag sbKeyVal cbNonceVal (LB.fromStrict msg) msgLen paddedLen + tsResult <- + callNode $ + impSb + <> jsOut + ( "S.sbEncryptTailTag(" + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 msg + <> "," + <> show msgLen + <> "n," + <> show paddedLen + <> "n)" + ) + tsResult `shouldBe` LB.toStrict hsResult + + it "Haskell encrypt -> TS decrypt (tail tag)" $ do + let msg = "haskell-to-ts streaming" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 128 :: Int64 + cipher = + either (error . show) id $ + LC.sbEncryptTailTag sbKeyVal cbNonceVal (LB.fromStrict msg) msgLen paddedLen + tsResult <- + callNode $ + impSb + <> "const r = S.sbDecryptTailTag(" + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "," + <> show paddedLen + <> "n," + <> jsUint8 (LB.toStrict cipher) + <> ");" + <> jsOut "new Uint8Array([r.valid ? 1 : 0, ...r.content])" + let (validByte, content) = B.splitAt 1 tsResult + validByte `shouldBe` B.pack [1] + content `shouldBe` msg + + it "TS encrypt -> Haskell decrypt (tail tag)" $ do + let msg = "ts-to-haskell streaming" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 64 :: Int64 + tsCipher <- + callNode $ + impSb + <> jsOut + ( "S.sbEncryptTailTag(" + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 msg + <> "," + <> show msgLen + <> "n," + <> show paddedLen + <> "n)" + ) + let (valid, plaintext) = + either (error . show) id $ + LC.sbDecryptTailTag sbKeyVal cbNonceVal paddedLen (LB.fromStrict tsCipher) + valid `shouldBe` True + LB.toStrict plaintext `shouldBe` msg + + it "tag tampering detection" $ do + let msg = "tamper test" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 64 :: Int64 + tsResult <- + callNode $ + impSb + <> "const enc = S.sbEncryptTailTag(" + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 msg + <> "," + <> show msgLen + <> "n," + <> show paddedLen + <> "n);" + <> "enc[enc.length - 1] ^= 1;" + <> "const r = S.sbDecryptTailTag(" + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "," + <> show paddedLen + <> "n, enc);" + <> jsOut "new Uint8Array([r.valid ? 1 : 0])" + tsResult `shouldBe` B.pack [0] + + describe "internal consistency" $ do + it "streaming matches NaCl secretbox (TS-only)" $ do + let msg = "salsa20 validation" :: B.ByteString + msgLen = fromIntegral (B.length msg) :: Int64 + paddedLen = 64 :: Int64 + tsResult <- + callNode $ + impPad + <> impSb + <> "const msg = " + <> jsUint8 msg + <> ";" + <> "const key = " + <> jsUint8 key32 + <> ";" + <> "const nonce = " + <> jsUint8 nonce24 + <> ";" + <> "const padded = P.padLazy(msg, " + <> show msgLen + <> "n, " + <> show paddedLen + <> "n);" + <> "const nacl = S.cryptoBox(key, nonce, padded);" + <> "const stream = S.sbEncryptTailTag(key, nonce, msg, " + <> show msgLen + <> "n, " + <> show paddedLen + <> "n);" + <> "const naclTag = nacl.subarray(0, 16);" + <> "const naclCipher = nacl.subarray(16);" + <> "const streamCipher = stream.subarray(0, " + <> show paddedLen + <> ");" + <> "const streamTag = stream.subarray(" + <> show paddedLen + <> ");" + <> "const cipherMatch = naclCipher.length === streamCipher.length && naclCipher.every((b,i) => b === streamCipher[i]);" + <> "const tagMatch = naclTag.length === streamTag.length && naclTag.every((b,i) => b === streamTag[i]);" + <> jsOut "new Uint8Array([cipherMatch ? 1 : 0, tagMatch ? 1 : 0])" + tsResult `shouldBe` B.pack [1, 1] + + it "multi-chunk matches single-shot (TS-only)" $ do + let msg = B.pack [1 .. 200] + tsResult <- + callNode $ + impSb + <> "const key = " + <> jsUint8 key32 + <> ";" + <> "const nonce = " + <> jsUint8 nonce24 + <> ";" + <> "const msg = " + <> jsUint8 msg + <> ";" + <> "const st1 = S.sbInit(key, nonce);" + <> "const c1 = S.sbEncryptChunk(st1, msg);" + <> "const t1 = S.sbAuth(st1);" + <> "const st2 = S.sbInit(key, nonce);" + <> "const parts = [msg.subarray(0,50), msg.subarray(50,100), msg.subarray(100,150), msg.subarray(150)];" + <> "const c2parts = parts.map(p => S.sbEncryptChunk(st2, p));" + <> "const c2 = new Uint8Array(200); let off = 0; c2parts.forEach(p => { c2.set(p, off); off += p.length; });" + <> "const t2 = S.sbAuth(st2);" + <> "const cipherMatch = c1.length === c2.length && c1.every((b,i) => b === c2[i]);" + <> "const tagMatch = t1.length === t2.length && t1.every((b,i) => b === t2[i]);" + <> jsOut "new Uint8Array([cipherMatch ? 1 : 0, tagMatch ? 1 : 0])" + tsResult `shouldBe` B.pack [1, 1] + +-- ── crypto/file ───────────────────────────────────────────────── + +tsFileCryptoTests :: Spec +tsFileCryptoTests = describe "crypto/file" $ do + let key32 = B.pack [1 .. 32] + nonce24 = B.pack [1 .. 24] + cbNonceVal = C.cbNonce nonce24 + sbKeyVal = C.unsafeSbKey key32 + + describe "FileHeader encoding" $ do + it "encodeFileHeader matches Haskell" $ do + let hdr = FileHeader "test.txt" Nothing + hsEncoded = smpEncode hdr + tsEncoded <- callNode $ impFile <> jsOut "F.encodeFileHeader({fileName: 'test.txt', fileExtra: null})" + tsEncoded `shouldBe` hsEncoded + + it "encodeFileHeader with fileExtra" $ do + let hdr = FileHeader "document.pdf" (Just "v2") + hsEncoded = smpEncode hdr + tsEncoded <- callNode $ impFile <> jsOut "F.encodeFileHeader({fileName: 'document.pdf', fileExtra: 'v2'})" + tsEncoded `shouldBe` hsEncoded + + it "Haskell encode -> TS parseFileHeader" $ do + let hdr = FileHeader "photo.jpg" (Just "extra") + encoded = smpEncode hdr + trailing = B.pack [10, 20, 30, 40, 50] + input = encoded <> trailing + tsResult <- + callNode $ + impFile + <> "const r = F.parseFileHeader(" + <> jsUint8 input + <> ");" + <> "const hdrBytes = F.encodeFileHeader(r.header);" + <> jsOut "new Uint8Array([...hdrBytes, ...r.rest])" + tsResult `shouldBe` input + + describe "file encryption" $ do + it "encryptFile matches Haskell" $ do + let source = "Hello, this is test file content!" :: B.ByteString + hdr = FileHeader "test.txt" Nothing + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 256 :: Int64 + sb = either (error . show) id $ LC.sbInit sbKeyVal cbNonceVal + lenStr = smpEncode fileSize' + (hdrEnc, sb1) = LC.sbEncryptChunk sb (lenStr <> fileHdr) + (srcEnc, sb2) = LC.sbEncryptChunk sb1 source + padLen = encSize - 16 - fileSize' - 8 + padding = B.replicate (fromIntegral padLen) 0x23 + (padEnc, sb3) = LC.sbEncryptChunk sb2 padding + tag = BA.convert (LC.sbAuth sb3) :: B.ByteString + hsEncrypted = B.concat [hdrEnc, srcEnc, padEnc, tag] + tsEncrypted <- + callNode $ + impFile + <> "const source = " + <> jsUint8 source + <> ";" + <> "const fileHdr = F.encodeFileHeader({fileName: 'test.txt', fileExtra: null});" + <> jsOut + ( "F.encryptFile(source, fileHdr, " + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "," + <> show fileSize' + <> "n," + <> show encSize + <> "n)" + ) + tsEncrypted `shouldBe` hsEncrypted + + it "Haskell encrypt -> TS decryptChunks" $ do + let source = "cross-language file test data" :: B.ByteString + hdr = FileHeader "data.bin" (Just "meta") + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 128 :: Int64 + sb = either (error . show) id $ LC.sbInit sbKeyVal cbNonceVal + lenStr = smpEncode fileSize' + (hdrEnc, sb1) = LC.sbEncryptChunk sb (lenStr <> fileHdr) + (srcEnc, sb2) = LC.sbEncryptChunk sb1 source + padLen = encSize - 16 - fileSize' - 8 + padding = B.replicate (fromIntegral padLen) 0x23 + (padEnc, sb3) = LC.sbEncryptChunk sb2 padding + tag = BA.convert (LC.sbAuth sb3) :: B.ByteString + encrypted = B.concat [hdrEnc, srcEnc, padEnc, tag] + tsResult <- + callNode $ + impFile + <> "const r = F.decryptChunks(" + <> show encSize + <> "n, [" + <> jsUint8 encrypted + <> "], " + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> ");" + <> "const hdrBytes = F.encodeFileHeader(r.header);" + <> jsOut "new Uint8Array([...hdrBytes, ...r.content])" + tsResult `shouldBe` (fileHdr <> source) + + it "TS encryptFile -> Haskell decrypt" $ do + let source = "ts-to-haskell file" :: B.ByteString + hdr = FileHeader "note.txt" Nothing + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 128 :: Int64 + paddedLen = encSize - 16 + tsEncrypted <- + callNode $ + impFile + <> "const source = " + <> jsUint8 source + <> ";" + <> "const fileHdr = F.encodeFileHeader({fileName: 'note.txt', fileExtra: null});" + <> jsOut + ( "F.encryptFile(source, fileHdr, " + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "," + <> show fileSize' + <> "n," + <> show encSize + <> "n)" + ) + let (valid, plaintext) = + either (error . show) id $ + LC.sbDecryptTailTag sbKeyVal cbNonceVal paddedLen (LB.fromStrict tsEncrypted) + valid `shouldBe` True + LB.toStrict plaintext `shouldBe` (fileHdr <> source) + + it "multi-chunk decrypt" $ do + let source = "multi-chunk file content" :: B.ByteString + hdr = FileHeader "multi.bin" Nothing + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 128 :: Int64 + sb = either (error . show) id $ LC.sbInit sbKeyVal cbNonceVal + lenStr = smpEncode fileSize' + (hdrEnc, sb1) = LC.sbEncryptChunk sb (lenStr <> fileHdr) + (srcEnc, sb2) = LC.sbEncryptChunk sb1 source + padLen = encSize - 16 - fileSize' - 8 + padding = B.replicate (fromIntegral padLen) 0x23 + (padEnc, sb3) = LC.sbEncryptChunk sb2 padding + tag = BA.convert (LC.sbAuth sb3) :: B.ByteString + encrypted = B.concat [hdrEnc, srcEnc, padEnc, tag] + (chunk1, rest) = B.splitAt 50 encrypted + (chunk2, chunk3) = B.splitAt 50 rest + tsResult <- + callNode $ + impFile + <> "const r = F.decryptChunks(" + <> show encSize + <> "n, [" + <> jsUint8 chunk1 + <> "," + <> jsUint8 chunk2 + <> "," + <> jsUint8 chunk3 + <> "], " + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> ");" + <> "const hdrBytes = F.encodeFileHeader(r.header);" + <> jsOut "new Uint8Array([...hdrBytes, ...r.content])" + tsResult `shouldBe` (fileHdr <> source) + + it "auth tag tampering detection" $ do + let source = "tamper detection file" :: B.ByteString + hdr = FileHeader "secret.dat" Nothing + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 128 :: Int64 + sb = either (error . show) id $ LC.sbInit sbKeyVal cbNonceVal + lenStr = smpEncode fileSize' + (hdrEnc, sb1) = LC.sbEncryptChunk sb (lenStr <> fileHdr) + (srcEnc, sb2) = LC.sbEncryptChunk sb1 source + padLen = encSize - 16 - fileSize' - 8 + padding = B.replicate (fromIntegral padLen) 0x23 + (padEnc, sb3) = LC.sbEncryptChunk sb2 padding + tag = BA.convert (LC.sbAuth sb3) :: B.ByteString + encrypted = B.concat [hdrEnc, srcEnc, padEnc, tag] + tsResult <- + callNode $ + impFile + <> "const enc = " + <> jsUint8 encrypted + <> ";" + <> "enc[enc.length - 1] ^= 1;" + <> "let ok = 0;" + <> "try { F.decryptChunks(" + <> show encSize + <> "n, [enc], " + <> jsUint8 key32 + <> "," + <> jsUint8 nonce24 + <> "); ok = 1; } catch(e) { ok = 0; }" + <> jsOut "new Uint8Array([ok])" + tsResult `shouldBe` B.pack [0] + +-- ── protocol/commands ──────────────────────────────────────────── + +tsCommandTests :: Spec +tsCommandTests = describe "protocol/commands" $ do + let sndKey = B.pack [1 .. 8] + rcvKey1 = B.pack [11 .. 18] + rcvKey2 = B.pack [21 .. 28] + digest = B.pack [31 .. 38] + size32 = 12345 :: Word32 + authKey = B.pack [41 .. 48] + dhKey = B.pack [51 .. 58] + + describe "encode" $ do + it "encodeFileInfo" $ do + let expected = smpEncode sndKey <> smpEncode size32 <> smpEncode digest + tsResult <- + callNode $ + impCmd + <> "const fi = {sndKey: " + <> jsUint8 sndKey + <> ", size: " + <> show size32 + <> ", digest: " + <> jsUint8 digest + <> "};" + <> jsOut "Cmd.encodeFileInfo(fi)" + tsResult `shouldBe` expected + + it "encodeFNEW with auth" $ do + let fileInfo = smpEncode sndKey <> smpEncode size32 <> smpEncode digest + rcvKeys = smpEncodeList [rcvKey1, rcvKey2] + auth = B.singleton 0x31 <> smpEncode authKey + expected = "FNEW " <> fileInfo <> rcvKeys <> auth + tsResult <- + callNode $ + impCmd + <> "const fi = {sndKey: " + <> jsUint8 sndKey + <> ", size: " + <> show size32 + <> ", digest: " + <> jsUint8 digest + <> "};" + <> "const rks = [" + <> jsUint8 rcvKey1 + <> "," + <> jsUint8 rcvKey2 + <> "];" + <> jsOut ("Cmd.encodeFNEW(fi, rks, " <> jsUint8 authKey <> ")") + tsResult `shouldBe` expected + + it "encodeFNEW without auth" $ do + let fileInfo = smpEncode sndKey <> smpEncode size32 <> smpEncode digest + rcvKeys = smpEncodeList [rcvKey1] + expected = "FNEW " <> fileInfo <> rcvKeys <> "0" + tsResult <- + callNode $ + impCmd + <> "const fi = {sndKey: " + <> jsUint8 sndKey + <> ", size: " + <> show size32 + <> ", digest: " + <> jsUint8 digest + <> "};" + <> "const rks = [" + <> jsUint8 rcvKey1 + <> "];" + <> jsOut "Cmd.encodeFNEW(fi, rks, null)" + tsResult `shouldBe` expected + + it "encodeFADD" $ do + let expected = "FADD " <> smpEncodeList [rcvKey1, rcvKey2] + tsResult <- + callNode $ + impCmd + <> jsOut ("Cmd.encodeFADD([" <> jsUint8 rcvKey1 <> "," <> jsUint8 rcvKey2 <> "])") + tsResult `shouldBe` expected + + it "encodeFPUT" $ do + tsResult <- callNode $ impCmd <> jsOut "Cmd.encodeFPUT()" + tsResult `shouldBe` "FPUT" + + it "encodeFDEL" $ do + tsResult <- callNode $ impCmd <> jsOut "Cmd.encodeFDEL()" + tsResult `shouldBe` "FDEL" + + it "encodeFGET" $ do + let expected = "FGET " <> smpEncode dhKey + tsResult <- + callNode $ + impCmd <> jsOut ("Cmd.encodeFGET(" <> jsUint8 dhKey <> ")") + tsResult `shouldBe` expected + + it "encodeFACK" $ do + tsResult <- callNode $ impCmd <> jsOut "Cmd.encodeFACK()" + tsResult `shouldBe` "FACK" + + it "encodePING" $ do + tsResult <- callNode $ impCmd <> jsOut "Cmd.encodePING()" + tsResult `shouldBe` "PING" + + describe "decode" $ do + it "decodeResponse OK" $ do + tsResult <- + callNode $ + impCmd + <> "const r = Cmd.decodeResponse(" + <> jsUint8 ("OK" :: B.ByteString) + <> ");" + <> jsOut "new Uint8Array([r.type === 'FROk' ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + it "decodeResponse PONG" $ do + tsResult <- + callNode $ + impCmd + <> "const r = Cmd.decodeResponse(" + <> jsUint8 ("PONG" :: B.ByteString) + <> ");" + <> jsOut "new Uint8Array([r.type === 'FRPong' ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + it "decodeResponse ERR AUTH" $ do + tsResult <- + callNode $ + impCmd + <> "const r = Cmd.decodeResponse(" + <> jsUint8 ("ERR AUTH" :: B.ByteString) + <> ");" + <> jsOut "new Uint8Array([r.type === 'FRErr' && r.err.type === 'AUTH' ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + it "decodeResponse ERR CMD SYNTAX" $ do + tsResult <- + callNode $ + impCmd + <> "const r = Cmd.decodeResponse(" + <> jsUint8 ("ERR CMD SYNTAX" :: B.ByteString) + <> ");" + <> jsOut "new Uint8Array([r.type === 'FRErr' && r.err.type === 'CMD' && r.err.cmdErr === 'SYNTAX' ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + it "decodeResponse SIDS" $ do + let senderId = B.pack [1 .. 24] + rId1 = B.pack [25 .. 48] + rId2 = B.pack [49 .. 72] + sidsBytes = "SIDS " <> smpEncode senderId <> smpEncodeList [rId1, rId2] + tsResult <- + callNode $ + impCmd + <> "const r = Cmd.decodeResponse(" + <> jsUint8 sidsBytes + <> ");" + <> "if (r.type !== 'FRSndIds') throw new Error('wrong type');" + <> jsOut "E.concatBytes(r.senderId, ...r.recipientIds)" + tsResult `shouldBe` (senderId <> rId1 <> rId2) + + it "decodeResponse RIDS" $ do + let rId1 = B.pack [1 .. 16] + rId2 = B.pack [17 .. 32] + ridsBytes = "RIDS " <> smpEncodeList [rId1, rId2] + tsResult <- + callNode $ + impCmd + <> "const r = Cmd.decodeResponse(" + <> jsUint8 ridsBytes + <> ");" + <> "if (r.type !== 'FRRcvIds') throw new Error('wrong type');" + <> jsOut "E.concatBytes(...r.recipientIds)" + tsResult `shouldBe` (rId1 <> rId2) + + it "decodeResponse FILE" $ do + let rawPub = B.pack [1 .. 32] + x25519Der = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + derKey = x25519Der <> rawPub + nonce = B.pack [201 .. 224] + fileBytes = "FILE " <> smpEncode derKey <> nonce + tsResult <- + callNode $ + impCmd + <> "const r = Cmd.decodeResponse(" + <> jsUint8 fileBytes + <> ");" + <> "if (r.type !== 'FRFile') throw new Error('wrong type: ' + r.type);" + <> jsOut "E.concatBytes(r.rcvDhKey, r.nonce)" + tsResult `shouldBe` (rawPub <> nonce) + +-- ── protocol/transmission ────────────────────────────────────────── + +tsTransmissionTests :: Spec +tsTransmissionTests = describe "protocol/transmission" $ do + describe "blockPad / blockUnpad" $ do + it "blockPad matches C.pad" $ do + let msg = "hello pad test" :: B.ByteString + blockSize = 256 :: Int + hsPadded = either (error . show) id $ C.pad msg blockSize + tsPadded <- + callNode $ + impTx <> jsOut ("Tx.blockPad(" <> jsUint8 msg <> ", " <> show blockSize <> ")") + tsPadded `shouldBe` hsPadded + + it "Haskell C.pad -> TS blockUnpad" $ do + let msg = "cross-language unpad" :: B.ByteString + blockSize = 128 :: Int + hsPadded = either (error . show) id $ C.pad msg blockSize + tsResult <- + callNode $ + impTx <> jsOut ("Tx.blockUnpad(" <> jsUint8 hsPadded <> ")") + tsResult `shouldBe` msg + + it "TS blockPad -> Haskell C.unPad" $ do + let msg = "ts-to-haskell pad" :: B.ByteString + blockSize = 128 :: Int + tsPadded <- + callNode $ + impTx <> jsOut ("Tx.blockPad(" <> jsUint8 msg <> ", " <> show blockSize <> ")") + let hsResult = either (error . show) id $ C.unPad tsPadded + hsResult `shouldBe` msg + + describe "transmission encoding" $ do + it "encodeTransmission unsigned (PING)" $ do + let sessionId = B.pack [201 .. 232] + corrId = "abc" :: B.ByteString + entityId = "" :: B.ByteString + cmdBytes = "PING" :: B.ByteString + -- implySessId = False: sessionId on wire + tWire = smpEncode sessionId <> smpEncode corrId <> smpEncode entityId <> cmdBytes + authenticator = smpEncode ("" :: B.ByteString) + encoded = authenticator <> tWire + batch = B.singleton 1 <> smpEncode (Large encoded) + expected = either (error . show) id $ C.pad batch 16384 + tsResult <- + callNode $ + impTx + <> jsOut + ( "Tx.encodeTransmission(" + <> jsUint8 sessionId + <> ", " + <> jsUint8 corrId + <> ", " + <> jsUint8 entityId + <> ", " + <> jsUint8 cmdBytes + <> ")" + ) + tsResult `shouldBe` expected + + it "encodeAuthTransmission signed" $ do + let seed = B.pack [1 .. 32] + sk = throwCryptoError $ Ed25519.secretKey seed + pk = Ed25519.toPublic sk + sessionId = B.pack [101 .. 132] + corrId = "xyz" :: B.ByteString + entityId = B.pack [1 .. 24] + cmdBytes = "FPUT" :: B.ByteString + tInner = smpEncode corrId <> smpEncode entityId <> cmdBytes + tForAuth = smpEncode sessionId <> tInner + sig = Ed25519.sign sk pk tForAuth + rawSig = BA.convert sig :: B.ByteString + authenticator = smpEncode rawSig + -- implySessId = False: tToSend = tForAuth (sessionId on wire) + encoded = authenticator <> tForAuth + batch = B.singleton 1 <> smpEncode (Large encoded) + expected = either (error . show) id $ C.pad batch 16384 + tsResult <- + callNode $ + impTx + <> "const kp = K.ed25519KeyPairFromSeed(" + <> jsUint8 seed + <> ");" + <> jsOut + ( "Tx.encodeAuthTransmission(" + <> jsUint8 sessionId + <> ", " + <> jsUint8 corrId + <> ", " + <> jsUint8 entityId + <> ", " + <> jsUint8 cmdBytes + <> ", kp.privateKey)" + ) + tsResult `shouldBe` expected + + it "decodeTransmission" $ do + let sessionId = B.pack [201 .. 232] + corrId = "r01" :: B.ByteString + entityId = B.pack [1 .. 16] + cmdBytes = "OK" :: B.ByteString + -- implySessId = False: sessionId on wire + tWire = smpEncode sessionId <> smpEncode corrId <> smpEncode entityId <> cmdBytes + authenticator = smpEncode ("" :: B.ByteString) + encoded = authenticator <> tWire + batch = B.singleton 1 <> smpEncode (Large encoded) + block = either (error . show) id $ C.pad batch 256 + tsResult <- + callNode $ + impTx + <> "const t = Tx.decodeTransmission(" + <> jsUint8 sessionId + <> ", " + <> jsUint8 block + <> ");" + <> jsOut "E.concatBytes(t.corrId, t.entityId, t.command)" + tsResult `shouldBe` (corrId <> entityId <> cmdBytes) + +-- ── protocol/handshake ──────────────────────────────────────────── + +tsHandshakeTests :: Spec +tsHandshakeTests = describe "protocol/handshake" $ do + describe "version range" $ do + it "encodeVersionRange" $ do + let expected = smpEncode (1 :: Word16) <> smpEncode (3 :: Word16) + tsResult <- + callNode $ + impHs + <> jsOut "Hs.encodeVersionRange({minVersion: 1, maxVersion: 3})" + tsResult `shouldBe` expected + + it "decodeVersionRange" $ do + let vrBytes = smpEncode (2 :: Word16) <> smpEncode (5 :: Word16) + tsResult <- + callNode $ + impHs + <> "const d = new E.Decoder(" + <> jsUint8 vrBytes + <> ");" + <> "const vr = Hs.decodeVersionRange(d);" + <> jsOut "E.concatBytes(E.encodeWord16(vr.minVersion), E.encodeWord16(vr.maxVersion))" + tsResult `shouldBe` vrBytes + + it "compatibleVRange (compatible)" $ do + -- intersection of [1,3] and [2,5] = [2,3] + let expected = smpEncode (2 :: Word16) <> smpEncode (3 :: Word16) + tsResult <- + callNode $ + impHs + <> "const r = Hs.compatibleVRange({minVersion:1,maxVersion:3},{minVersion:2,maxVersion:5});" + <> "if (!r) throw new Error('expected compatible');" + <> jsOut "Hs.encodeVersionRange(r)" + tsResult `shouldBe` expected + + it "compatibleVRange (incompatible)" $ do + tsResult <- + callNode $ + impHs + <> "const r = Hs.compatibleVRange({minVersion:1,maxVersion:2},{minVersion:3,maxVersion:5});" + <> jsOut "new Uint8Array([r === null ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + describe "client handshake" $ do + it "encodeClientHandshake" $ do + let kh = B.pack [1 .. 32] + body = smpEncode (3 :: Word16) <> smpEncode kh + expected = either (error . show) id $ C.pad body 16384 + tsResult <- + callNode $ + impHs + <> jsOut ("Hs.encodeClientHandshake({xftpVersion:3,keyHash:" <> jsUint8 kh <> "})") + tsResult `shouldBe` expected + + describe "client hello" $ do + it "encodeClientHello (Nothing)" $ do + let expected = smpEncode (XFTPClientHello {webChallenge = Nothing}) + tsResult <- + callNode $ + impHs + <> jsOut "Hs.encodeClientHello({webChallenge: null})" + tsResult `shouldBe` expected + + it "encodeClientHello (Just challenge)" $ do + let challenge = B.pack [1 .. 32] + expected = smpEncode (XFTPClientHello {webChallenge = Just challenge}) + tsResult <- + callNode $ + impHs + <> jsOut ("Hs.encodeClientHello({webChallenge:" <> jsUint8 challenge <> "})") + tsResult `shouldBe` expected + + describe "server handshake" $ do + it "decodeServerHandshake" $ do + let sessId = B.pack [1 .. 32] + cert1 = B.pack [101 .. 200] -- 100 bytes + cert2 = B.pack [201 .. 232] -- 32 bytes + signedKeyBytes = B.pack [1 .. 120] + -- Encode server handshake body matching Haskell wire format: + -- smpEncode (versionRange, sessionId, certChainPubKey) + -- where certChainPubKey = (NonEmpty Large certChain, Large signedKey) + body = + smpEncode (1 :: Word16) + <> smpEncode (3 :: Word16) + <> smpEncode sessId + <> smpEncode (NE.fromList [Large cert1, Large cert2]) + <> smpEncode (Large signedKeyBytes) + serverBlock = either (error . show) id $ C.pad body 16384 + tsResult <- + callNode $ + impHs + <> "const hs = Hs.decodeServerHandshake(" + <> jsUint8 serverBlock + <> ");" + <> jsOut + ( "E.concatBytes(" + <> "E.encodeWord16(hs.xftpVersionRange.minVersion)," + <> "E.encodeWord16(hs.xftpVersionRange.maxVersion)," + <> "hs.sessionId," + <> "...hs.certChainDer," + <> "hs.signedKeyDer)" + ) + -- Expected: vmin(2) + vmax(2) + sessId(32) + cert1(100) + cert2(32) + signedKey(120) = 288 bytes + tsResult + `shouldBe` ( smpEncode (1 :: Word16) + <> smpEncode (3 :: Word16) + <> sessId + <> cert1 + <> cert2 + <> signedKeyBytes + ) + + it "decodeServerHandshake with webIdentityProof" $ do + let sessId = B.pack [1 .. 32] + cert1 = B.pack [101 .. 200] + cert2 = B.pack [201 .. 232] + signedKeyBytes = B.pack [1 .. 120] + sigBytes = B.pack [1 .. 64] + body = + smpEncode (1 :: Word16) + <> smpEncode (3 :: Word16) + <> smpEncode sessId + <> smpEncode (NE.fromList [Large cert1, Large cert2]) + <> smpEncode (Large signedKeyBytes) + <> smpEncode sigBytes + serverBlock = either (error . show) id $ C.pad body 16384 + tsResult <- + callNode $ + impHs + <> "const hs = Hs.decodeServerHandshake(" + <> jsUint8 serverBlock + <> ");" + <> jsOut "hs.webIdentityProof || new Uint8Array(0)" + tsResult `shouldBe` sigBytes + + it "decodeServerHandshake without webIdentityProof" $ do + let sessId = B.pack [1 .. 32] + cert1 = B.pack [101 .. 200] + cert2 = B.pack [201 .. 232] + signedKeyBytes = B.pack [1 .. 120] + body = + smpEncode (1 :: Word16) + <> smpEncode (3 :: Word16) + <> smpEncode sessId + <> smpEncode (NE.fromList [Large cert1, Large cert2]) + <> smpEncode (Large signedKeyBytes) + <> smpEncode ("" :: B.ByteString) + serverBlock = either (error . show) id $ C.pad body 16384 + tsResult <- + callNode $ + impHs + <> "const hs = Hs.decodeServerHandshake(" + <> jsUint8 serverBlock + <> ");" + <> jsOut "new Uint8Array([hs.webIdentityProof === null ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + describe "certificate utilities" $ do + it "caFingerprint" $ do + let cert1 = B.pack [101 .. 200] + cert2 = B.pack [201 .. 232] + expected = C.sha256Hash cert2 + tsResult <- + callNode $ + impHs + <> "const chain = [" + <> jsUint8 cert1 + <> "," + <> jsUint8 cert2 + <> "];" + <> jsOut "Hs.caFingerprint(chain)" + tsResult `shouldBe` expected + + it "caFingerprint 3 certs" $ do + let cert1 = B.pack [1 .. 10] + cert2 = B.pack [11 .. 20] + cert3 = B.pack [21 .. 30] + expected = C.sha256Hash cert2 + tsResult <- + callNode $ + impHs + <> "const chain = [" + <> jsUint8 cert1 + <> "," + <> jsUint8 cert2 + <> "," + <> jsUint8 cert3 + <> "];" + <> jsOut "Hs.caFingerprint(chain)" + tsResult `shouldBe` expected + + it "chainIdCaCerts 2 certs" $ do + let cert1 = B.pack [1 .. 10] + cert2 = B.pack [11 .. 20] + tsResult <- + callNode $ + impHs + <> "const cc = Hs.chainIdCaCerts([" + <> jsUint8 cert1 + <> "," + <> jsUint8 cert2 + <> "]);" + <> "if (cc.type !== 'valid') throw new Error('expected valid');" + <> jsOut "E.concatBytes(cc.leafCert, cc.idCert, cc.caCert)" + tsResult `shouldBe` (cert1 <> cert2 <> cert2) + + it "chainIdCaCerts 3 certs" $ do + let cert1 = B.pack [1 .. 10] + cert2 = B.pack [11 .. 20] + cert3 = B.pack [21 .. 30] + tsResult <- + callNode $ + impHs + <> "const cc = Hs.chainIdCaCerts([" + <> jsUint8 cert1 + <> "," + <> jsUint8 cert2 + <> "," + <> jsUint8 cert3 + <> "]);" + <> "if (cc.type !== 'valid') throw new Error('expected valid');" + <> jsOut "E.concatBytes(cc.leafCert, cc.idCert, cc.caCert)" + tsResult `shouldBe` (cert1 <> cert2 <> cert3) + + it "chainIdCaCerts 4 certs" $ do + let cert1 = B.pack [1 .. 10] + cert2 = B.pack [11 .. 20] + cert3 = B.pack [21 .. 30] + cert4 = B.pack [31 .. 40] + tsResult <- + callNode $ + impHs + <> "const cc = Hs.chainIdCaCerts([" + <> jsUint8 cert1 + <> "," + <> jsUint8 cert2 + <> "," + <> jsUint8 cert3 + <> "," + <> jsUint8 cert4 + <> "]);" + <> "if (cc.type !== 'valid') throw new Error('expected valid');" + <> jsOut "E.concatBytes(cc.leafCert, cc.idCert, cc.caCert)" + tsResult `shouldBe` (cert1 <> cert2 <> cert4) + + describe "SignedExact parsing" $ do + it "extractSignedKey" $ do + -- Generate signing key (Ed25519) + let signSeed = B.pack [1 .. 32] + signSk = throwCryptoError $ Ed25519.secretKey signSeed + signPk = Ed25519.toPublic signSk + signPkRaw = BA.convert signPk :: B.ByteString + -- Generate DH key (X25519) + dhSeed = B.pack [41 .. 72] + dhSk = throwCryptoError $ X25519.secretKey dhSeed + dhPk = X25519.toPublic dhSk + dhPkRaw = BA.convert dhPk :: B.ByteString + -- SubjectPublicKeyInfo DER for X25519 (44 bytes) + x25519Prefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + spkiDer = x25519Prefix <> dhPkRaw + -- Sign the SPKI with Ed25519 + sig = Ed25519.sign signSk signPk spkiDer + sigRaw = BA.convert sig :: B.ByteString + -- AlgorithmIdentifier for Ed25519 (7 bytes) + algId = B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70] + -- BIT STRING wrapper (3 + 64 = 67 bytes) + bitString = B.pack [0x03, 0x41, 0x00] <> sigRaw + -- Outer SEQUENCE: content = 44 + 7 + 67 = 118 = 0x76 + content = spkiDer <> algId <> bitString + signedExactDer = B.pack [0x30, 0x76] <> content + tsResult <- + callNode $ + impHs + <> "const sk = Hs.extractSignedKey(" + <> jsUint8 signedExactDer + <> ");" + <> jsOut "E.concatBytes(sk.dhKey, sk.signature)" + -- dhKey (32) + signature (64) = 96 bytes + tsResult `shouldBe` (dhPkRaw <> sigRaw) + + it "extractSignedKey signature verifies" $ do + let signSeed = B.pack [1 .. 32] + signSk = throwCryptoError $ Ed25519.secretKey signSeed + signPk = Ed25519.toPublic signSk + signPkRaw = BA.convert signPk :: B.ByteString + dhSeed = B.pack [41 .. 72] + dhSk = throwCryptoError $ X25519.secretKey dhSeed + dhPk = X25519.toPublic dhSk + dhPkRaw = BA.convert dhPk :: B.ByteString + x25519Prefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + spkiDer = x25519Prefix <> dhPkRaw + sig = Ed25519.sign signSk signPk spkiDer + sigRaw = BA.convert sig :: B.ByteString + algId = B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70] + bitString = B.pack [0x03, 0x41, 0x00] <> sigRaw + content = spkiDer <> algId <> bitString + signedExactDer = B.pack [0x30, 0x76] <> content + tsResult <- + callNode $ + impHs + <> "const sk = Hs.extractSignedKey(" + <> jsUint8 signedExactDer + <> ");" + <> "const ok = K.verify(" + <> jsUint8 signPkRaw + <> ", sk.signature, sk.objectDer);" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + +-- ── crypto/identity ────────────────────────────────────────────── + +-- Construct a minimal X.509 certificate DER with an Ed25519 public key. +-- Structurally valid for DER navigation but not a real certificate. +mkFakeCertDer :: B.ByteString -> B.ByteString +mkFakeCertDer pubKey32 = + let spki = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00] <> pubKey32 + tbsContents = + B.concat + [ B.pack [0xa0, 0x03, 0x02, 0x01, 0x02], + B.pack [0x02, 0x01, 0x01], + B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70], + B.pack [0x30, 0x00], + B.pack [0x30, 0x00], + B.pack [0x30, 0x00], + spki + ] + tbs = B.pack [0x30, fromIntegral $ B.length tbsContents] <> tbsContents + certContents = + B.concat + [ tbs, + B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70], + B.pack [0x03, 0x41, 0x00] <> B.replicate 64 0 + ] + certLen = B.length certContents + in B.pack [0x30, 0x81, fromIntegral certLen] <> certContents + +tsIdentityTests :: Spec +tsIdentityTests = describe "crypto/identity" $ do + describe "extractCertPublicKeyInfo" $ do + it "extracts SPKI from X.509 DER" $ do + let pubKey = B.pack [1 .. 32] + certDer = mkFakeCertDer pubKey + expectedSpki = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00] <> pubKey + tsResult <- + callNode $ + impId + <> jsOut ("Id.extractCertPublicKeyInfo(" <> jsUint8 certDer <> ")") + tsResult `shouldBe` expectedSpki + + it "extractCertPublicKeyInfo + decodePubKey returns raw 32-byte key" $ do + let pubKey = B.pack [1 .. 32] + certDer = mkFakeCertDer pubKey + tsResult <- + callNode $ + impId + <> jsOut ("K.decodePubKeyEd25519(Id.extractCertPublicKeyInfo(" <> jsUint8 certDer <> "))") + tsResult `shouldBe` pubKey + + describe "verifyIdentityProof" $ do + it "valid proof returns true" $ do + let signSeed = B.pack [1 .. 32] + signSk = throwCryptoError $ Ed25519.secretKey signSeed + signPk = Ed25519.toPublic signSk + signPkRaw = BA.convert signPk :: B.ByteString + leafCertDer = mkFakeCertDer signPkRaw + idCertDer = B.pack [1 .. 50] + keyHash = C.sha256Hash idCertDer + -- DH key SignedExact + dhSeed = B.pack [41 .. 72] + dhSk = throwCryptoError $ X25519.secretKey dhSeed + dhPk = X25519.toPublic dhSk + dhPkRaw = BA.convert dhPk :: B.ByteString + x25519Prefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + spkiDer = x25519Prefix <> dhPkRaw + dhSig = Ed25519.sign signSk signPk spkiDer + dhSigRaw = BA.convert dhSig :: B.ByteString + algId = B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70] + bitString = B.pack [0x03, 0x41, 0x00] <> dhSigRaw + signedKeyDer = B.pack [0x30, 0x76] <> spkiDer <> algId <> bitString + -- Challenge signature + challenge = B.pack [101 .. 132] + sessionId = B.pack [201 .. 232] + challengeSig = Ed25519.sign signSk signPk (challenge <> sessionId) + challengeSigRaw = BA.convert challengeSig :: B.ByteString + tsResult <- + callNode $ + impId + <> "const ok = Id.verifyIdentityProof({" + <> "certChainDer: [" + <> jsUint8 leafCertDer + <> "," + <> jsUint8 idCertDer + <> "]," + <> "signedKeyDer: " + <> jsUint8 signedKeyDer + <> "," + <> "sigBytes: " + <> jsUint8 challengeSigRaw + <> "," + <> "challenge: " + <> jsUint8 challenge + <> "," + <> "sessionId: " + <> jsUint8 sessionId + <> "," + <> "keyHash: " + <> jsUint8 keyHash + <> "});" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + it "wrong keyHash returns false" $ do + let signSeed = B.pack [1 .. 32] + signSk = throwCryptoError $ Ed25519.secretKey signSeed + signPk = Ed25519.toPublic signSk + signPkRaw = BA.convert signPk :: B.ByteString + leafCertDer = mkFakeCertDer signPkRaw + idCertDer = B.pack [1 .. 50] + wrongKeyHash = B.replicate 32 0xff + -- DH key SignedExact + dhSeed = B.pack [41 .. 72] + dhSk = throwCryptoError $ X25519.secretKey dhSeed + dhPk = X25519.toPublic dhSk + dhPkRaw = BA.convert dhPk :: B.ByteString + x25519Prefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + spkiDer = x25519Prefix <> dhPkRaw + dhSig = Ed25519.sign signSk signPk spkiDer + dhSigRaw = BA.convert dhSig :: B.ByteString + algId = B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70] + bitString = B.pack [0x03, 0x41, 0x00] <> dhSigRaw + signedKeyDer = B.pack [0x30, 0x76] <> spkiDer <> algId <> bitString + challenge = B.pack [101 .. 132] + sessionId = B.pack [201 .. 232] + challengeSig = Ed25519.sign signSk signPk (challenge <> sessionId) + challengeSigRaw = BA.convert challengeSig :: B.ByteString + tsResult <- + callNode $ + impId + <> "const ok = Id.verifyIdentityProof({" + <> "certChainDer: [" + <> jsUint8 leafCertDer + <> "," + <> jsUint8 idCertDer + <> "]," + <> "signedKeyDer: " + <> jsUint8 signedKeyDer + <> "," + <> "sigBytes: " + <> jsUint8 challengeSigRaw + <> "," + <> "challenge: " + <> jsUint8 challenge + <> "," + <> "sessionId: " + <> jsUint8 sessionId + <> "," + <> "keyHash: " + <> jsUint8 wrongKeyHash + <> "});" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + tsResult `shouldBe` B.pack [0] + + it "wrong challenge sig returns false" $ do + let signSeed = B.pack [1 .. 32] + signSk = throwCryptoError $ Ed25519.secretKey signSeed + signPk = Ed25519.toPublic signSk + signPkRaw = BA.convert signPk :: B.ByteString + leafCertDer = mkFakeCertDer signPkRaw + idCertDer = B.pack [1 .. 50] + keyHash = C.sha256Hash idCertDer + -- DH key SignedExact + dhSeed = B.pack [41 .. 72] + dhSk = throwCryptoError $ X25519.secretKey dhSeed + dhPk = X25519.toPublic dhSk + dhPkRaw = BA.convert dhPk :: B.ByteString + x25519Prefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + spkiDer = x25519Prefix <> dhPkRaw + dhSig = Ed25519.sign signSk signPk spkiDer + dhSigRaw = BA.convert dhSig :: B.ByteString + algId = B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70] + bitString = B.pack [0x03, 0x41, 0x00] <> dhSigRaw + signedKeyDer = B.pack [0x30, 0x76] <> spkiDer <> algId <> bitString + challenge = B.pack [101 .. 132] + sessionId = B.pack [201 .. 232] + wrongChallenge = B.pack [1 .. 32] + wrongSig = Ed25519.sign signSk signPk (wrongChallenge <> sessionId) + wrongSigRaw = BA.convert wrongSig :: B.ByteString + tsResult <- + callNode $ + impId + <> "const ok = Id.verifyIdentityProof({" + <> "certChainDer: [" + <> jsUint8 leafCertDer + <> "," + <> jsUint8 idCertDer + <> "]," + <> "signedKeyDer: " + <> jsUint8 signedKeyDer + <> "," + <> "sigBytes: " + <> jsUint8 wrongSigRaw + <> "," + <> "challenge: " + <> jsUint8 challenge + <> "," + <> "sessionId: " + <> jsUint8 sessionId + <> "," + <> "keyHash: " + <> jsUint8 keyHash + <> "});" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + tsResult `shouldBe` B.pack [0] + + it "wrong DH key sig returns false" $ do + let signSeed = B.pack [1 .. 32] + signSk = throwCryptoError $ Ed25519.secretKey signSeed + signPk = Ed25519.toPublic signSk + signPkRaw = BA.convert signPk :: B.ByteString + leafCertDer = mkFakeCertDer signPkRaw + idCertDer = B.pack [1 .. 50] + keyHash = C.sha256Hash idCertDer + -- DH key signed by a DIFFERENT key + otherSeed = B.pack [51 .. 82] + otherSk = throwCryptoError $ Ed25519.secretKey otherSeed + otherPk = Ed25519.toPublic otherSk + dhSeed = B.pack [41 .. 72] + dhSk = throwCryptoError $ X25519.secretKey dhSeed + dhPk = X25519.toPublic dhSk + dhPkRaw = BA.convert dhPk :: B.ByteString + x25519Prefix = B.pack [0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00] + spkiDer = x25519Prefix <> dhPkRaw + dhSig = Ed25519.sign otherSk otherPk spkiDer + dhSigRaw = BA.convert dhSig :: B.ByteString + algId = B.pack [0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70] + bitString = B.pack [0x03, 0x41, 0x00] <> dhSigRaw + signedKeyDer = B.pack [0x30, 0x76] <> spkiDer <> algId <> bitString + challenge = B.pack [101 .. 132] + sessionId = B.pack [201 .. 232] + challengeSig = Ed25519.sign signSk signPk (challenge <> sessionId) + challengeSigRaw = BA.convert challengeSig :: B.ByteString + tsResult <- + callNode $ + impId + <> "const ok = Id.verifyIdentityProof({" + <> "certChainDer: [" + <> jsUint8 leafCertDer + <> "," + <> jsUint8 idCertDer + <> "]," + <> "signedKeyDer: " + <> jsUint8 signedKeyDer + <> "," + <> "sigBytes: " + <> jsUint8 challengeSigRaw + <> "," + <> "challenge: " + <> jsUint8 challenge + <> "," + <> "sessionId: " + <> jsUint8 sessionId + <> "," + <> "keyHash: " + <> jsUint8 keyHash + <> "});" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + tsResult `shouldBe` B.pack [0] + +-- ── protocol/description ────────────────────────────────────────── + +tsDescriptionTests :: Spec +tsDescriptionTests = describe "protocol/description" $ do + describe "base64url" $ do + it "encode matches Haskell strEncode" $ do + let bs = B.pack [0 .. 31] + tsResult <- + callNode $ + impDesc + <> jsOut ("new TextEncoder().encode(Desc.base64urlEncode(" <> jsUint8 bs <> "))") + tsResult `shouldBe` strEncode bs + + it "decode recovers original" $ do + let bs = B.pack [0 .. 31] + encoded = strEncode bs + tsResult <- + callNode $ + impDesc + <> "const s = new TextDecoder().decode(" + <> jsUint8 encoded + <> ");" + <> jsOut "Desc.base64urlDecode(s)" + tsResult `shouldBe` bs + + it "round-trip 256 bytes" $ do + let bs = B.pack [0 .. 255] + tsResult <- + callNode $ + impDesc + <> "const data = " + <> jsUint8 bs + <> ";" + <> "const encoded = Desc.base64urlEncode(data);" + <> jsOut "Desc.base64urlDecode(encoded)" + tsResult `shouldBe` bs + + describe "FileSize" $ do + it "encodeFileSize" $ do + let sizes = [500, 1024, 2048, 1048576, 8388608, 1073741824, 27262976 :: Int64] + expected = B.intercalate "," $ map (strEncode . FileSize) sizes + tsResult <- + callNode $ + impDesc + <> "const sizes = [500, 1024, 2048, 1048576, 8388608, 1073741824, 27262976];" + <> jsOut "new TextEncoder().encode(sizes.map(Desc.encodeFileSize).join(','))" + tsResult `shouldBe` expected + + it "decodeFileSize" $ do + tsResult <- + callNode $ + impDesc + <> "const strs = ['500','1kb','2kb','1mb','8mb','1gb'];" + <> jsOut "new TextEncoder().encode(strs.map(s => String(Desc.decodeFileSize(s))).join(','))" + tsResult `shouldBe` "500,1024,2048,1048576,8388608,1073741824" + + describe "FileDescription" $ do + it "fixture YAML round-trip" $ do + fixture <- B.readFile "tests/fixtures/file_description.yaml" + tsResult <- + callNode $ + impDesc + <> "const yaml = new TextDecoder().decode(" + <> jsUint8 fixture + <> ");" + <> "const fd = Desc.decodeFileDescription(yaml);" + <> "const reEncoded = Desc.encodeFileDescription(fd);" + <> jsOut "new TextEncoder().encode(reEncoded)" + tsResult `shouldBe` fixture + + it "fixture parsed structure" $ do + fixture <- B.readFile "tests/fixtures/file_description.yaml" + tsResult <- + callNode $ + impDesc + <> "const yaml = new TextDecoder().decode(" + <> jsUint8 fixture + <> ");" + <> "const fd = Desc.decodeFileDescription(yaml);" + <> "const r = [" + <> "fd.party," + <> "String(fd.size)," + <> "String(fd.chunkSize)," + <> "String(fd.chunks.length)," + <> "String(fd.chunks[0].replicas.length)," + <> "String(fd.chunks[3].chunkSize)," + <> "fd.redirect === null ? 'null' : 'redirect'" + <> "].join(',');" + <> jsOut "new TextEncoder().encode(r)" + tsResult `shouldBe` "recipient,27262976,8388608,4,2,2097152,null" + + it "encode with redirect round-trips" $ do + tsResult <- + callNode $ + impDesc + <> "const fd = {" + <> " party: 'sender'," + <> " size: 1024," + <> " digest: new Uint8Array([1,2,3])," + <> " key: new Uint8Array(32)," + <> " nonce: new Uint8Array(24)," + <> " chunkSize: 1024," + <> " chunks: [{chunkNo: 1, chunkSize: 1024, digest: new Uint8Array([4,5,6])," + <> " replicas: [{server: 'xftp://abc=@example.com', replicaId: new Uint8Array([7,8,9])," + <> " replicaKey: new Uint8Array([10,11,12])}]}]," + <> " redirect: {size: 512, digest: new Uint8Array([13,14,15])}" + <> "};" + <> "const yaml = Desc.encodeFileDescription(fd);" + <> "const fd2 = Desc.decodeFileDescription(yaml);" + <> "const r = [" + <> "fd2.party," + <> "String(fd2.redirect !== null)," + <> "String(fd2.redirect?.size)," + <> "Desc.base64urlEncode(fd2.redirect?.digest || new Uint8Array())" + <> "].join(',');" + <> jsOut "new TextEncoder().encode(r)" + tsResult `shouldBe` "sender,true,512,DQ4P" + + it "fdSeparator" $ do + tsResult <- + callNode $ + impDesc + <> jsOut "new TextEncoder().encode(Desc.fdSeparator)" + tsResult `shouldBe` "################################\n" + + describe "validation" $ do + it "valid description" $ do + fixture <- B.readFile "tests/fixtures/file_description.yaml" + tsResult <- + callNode $ + impDesc + <> "const yaml = new TextDecoder().decode(" + <> jsUint8 fixture + <> ");" + <> "const fd = Desc.decodeFileDescription(yaml);" + <> "const r = Desc.validateFileDescription(fd);" + <> jsOut "new TextEncoder().encode(r === null ? 'ok' : r)" + tsResult `shouldBe` "ok" + + it "non-sequential chunks" $ do + fixture <- B.readFile "tests/fixtures/file_description.yaml" + tsResult <- + callNode $ + impDesc + <> "const yaml = new TextDecoder().decode(" + <> jsUint8 fixture + <> ");" + <> "const fd = Desc.decodeFileDescription(yaml);" + <> "fd.chunks[1].chunkNo = 5;" + <> "const r = Desc.validateFileDescription(fd);" + <> jsOut "new TextEncoder().encode(r || 'ok')" + tsResult `shouldBe` "chunk numbers are not sequential" + + it "mismatched size" $ do + fixture <- B.readFile "tests/fixtures/file_description.yaml" + tsResult <- + callNode $ + impDesc + <> "const yaml = new TextDecoder().decode(" + <> jsUint8 fixture + <> ");" + <> "const fd = Desc.decodeFileDescription(yaml);" + <> "fd.size = 999;" + <> "const r = Desc.validateFileDescription(fd);" + <> jsOut "new TextEncoder().encode(r || 'ok')" + tsResult `shouldBe` "chunks total size is different than file size" + +-- ── protocol/chunks ─────────────────────────────────────────────── + +tsChunkTests :: Spec +tsChunkTests = describe "protocol/chunks" $ do + describe "prepareChunkSizes" $ do + it "matches Haskell for various sizes" $ do + let sizes = [100, 65536, 130000, 200000, 500000, 800000, 5000000, 27262976 :: Int64] + hsResults = map prepareChunkSizes sizes + expected = B.intercalate "|" $ map (\cs -> B.intercalate "," $ map (strEncode . FileSize) cs) hsResults + tsResult <- + callNode $ + impChk + <> "const sizes = [100, 65536, 130000, 200000, 500000, 800000, 5000000, 27262976];" + <> "const results = sizes.map(s => Chk.prepareChunkSizes(s).map(Desc.encodeFileSize).join(','));" + <> jsOut "new TextEncoder().encode(results.join('|'))" + tsResult `shouldBe` expected + + it "zero size" $ do + tsResult <- + callNode $ + impChk + <> jsOut "new TextEncoder().encode(Chk.prepareChunkSizes(0).join(','))" + tsResult `shouldBe` "" + + describe "singleChunkSize" $ do + it "finds smallest fitting chunk size" $ do + tsResult <- + callNode $ + impChk + <> "const sizes = [100, 65536, 262144, 300000, 1048576, 4194304, 5000000];" + <> "const results = sizes.map(s => {" + <> " const r = Chk.singleChunkSize(s);" + <> " return r === null ? 'null' : Desc.encodeFileSize(r);" + <> "});" + <> jsOut "new TextEncoder().encode(results.join(','))" + tsResult `shouldBe` "64kb,64kb,256kb,1mb,1mb,4mb,null" + + describe "prepareChunkSpecs" $ do + it "generates correct offsets" $ do + tsResult <- + callNode $ + impChk + <> "const specs = Chk.prepareChunkSpecs([4194304, 4194304, 1048576]);" + <> "const r = specs.map(s => s.chunkOffset + ':' + s.chunkSize).join(',');" + <> jsOut "new TextEncoder().encode(r)" + tsResult `shouldBe` "0:4194304,4194304:4194304,8388608:1048576" + + describe "getChunkDigest" $ do + it "matches Haskell sha256Hash" $ do + let chunk = B.pack [0 .. 63] + expected = C.sha256Hash chunk + tsResult <- + callNode $ + impChk + <> jsOut ("Chk.getChunkDigest(" <> jsUint8 chunk <> ")") + tsResult `shouldBe` expected + + describe "constants" $ do + it "serverChunkSizes" $ do + tsResult <- + callNode $ + impChk + <> jsOut "new TextEncoder().encode(Chk.serverChunkSizes.map(Desc.encodeFileSize).join(','))" + tsResult `shouldBe` "64kb,256kb,1mb,4mb" + + it "fileSizeLen and authTagSize" $ do + tsResult <- + callNode $ + impChk + <> jsOut "new TextEncoder().encode(Chk.fileSizeLen + ',' + Chk.authTagSize)" + tsResult `shouldBe` "8,16" + +-- ── protocol/client ───────────────────────────────────────────── + +tsClientTests :: Spec +tsClientTests = describe "protocol/client" $ do + -- Fixed X25519 key pairs for deterministic tests + let privARaw = B.pack [1 .. 32] + privA = throwCryptoError $ X25519.secretKey privARaw + pubA = X25519.toPublic privA + pubARaw = BA.convert pubA :: B.ByteString + privBRaw = B.pack [33 .. 64] + privB = throwCryptoError $ X25519.secretKey privBRaw + pubB = X25519.toPublic privB + pubBRaw = BA.convert pubB :: B.ByteString + nonce24 = B.pack [0 .. 23] + + describe "cbAuthenticate" $ do + it "matches Haskell output" $ do + let msg = "hello world authenticator test" + C.CbAuthenticator expected = + C.cbAuthenticate + (C.PublicKeyX25519 pubA) + (C.PrivateKeyX25519 privB) + (C.cbNonce nonce24) + msg + tsResult <- + callNode $ + impCli + <> "const auth = Cli.cbAuthenticate(" + <> jsUint8 pubARaw + <> "," + <> jsUint8 privBRaw + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 msg + <> ");" + <> jsOut "auth" + tsResult `shouldBe` expected + + it "is 80 bytes" $ do + let msg = "size test" + C.CbAuthenticator expected = + C.cbAuthenticate + (C.PublicKeyX25519 pubA) + (C.PrivateKeyX25519 privB) + (C.cbNonce nonce24) + msg + B.length expected `shouldBe` 80 + + describe "cbVerify" $ do + it "validates Haskell authenticator" $ do + let msg = "test message for verify" + C.CbAuthenticator authBytes_ = + C.cbAuthenticate + (C.PublicKeyX25519 pubA) + (C.PrivateKeyX25519 privB) + (C.cbNonce nonce24) + msg + tsResult <- + callNode $ + impCli + <> "const valid = Cli.cbVerify(" + <> jsUint8 pubBRaw + <> "," + <> jsUint8 privARaw + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 authBytes_ + <> "," + <> jsUint8 msg + <> ");" + <> jsOut "new Uint8Array([valid ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + it "rejects wrong message" $ do + let msg = "correct message" + wrongMsg = "wrong message" + C.CbAuthenticator authBytes_ = + C.cbAuthenticate + (C.PublicKeyX25519 pubA) + (C.PrivateKeyX25519 privB) + (C.cbNonce nonce24) + msg + tsResult <- + callNode $ + impCli + <> "const valid = Cli.cbVerify(" + <> jsUint8 pubBRaw + <> "," + <> jsUint8 privARaw + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 authBytes_ + <> "," + <> jsUint8 wrongMsg + <> ");" + <> jsOut "new Uint8Array([valid ? 1 : 0])" + tsResult `shouldBe` B.pack [0] + + it "round-trip: TS authenticate, Haskell verify" $ do + let msg = "round trip test" + tsAuth <- + callNode $ + impCli + <> "const auth = Cli.cbAuthenticate(" + <> jsUint8 pubARaw + <> "," + <> jsUint8 privBRaw + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 msg + <> ");" + <> jsOut "auth" + let hsValid = + C.cbVerify + (C.PublicKeyX25519 pubB) + (C.PrivateKeyX25519 privA) + (C.cbNonce nonce24) + (C.CbAuthenticator tsAuth) + msg + hsValid `shouldBe` True + + describe "transport chunk encryption" $ do + let dhSecret = C.dh' (C.PublicKeyX25519 pubA) (C.PrivateKeyX25519 privB) + dhSecretBytes = case dhSecret of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + + it "encryptTransportChunk matches Haskell" $ do + let plaintext = B.pack [100 .. 199] + state0 = either (error . show) id $ LC.cbInit dhSecret (C.cbNonce nonce24) + (cipher, state1) = LC.sbEncryptChunk state0 plaintext + tag = BA.convert $ LC.sbAuth state1 :: B.ByteString + expected = cipher <> tag + tsResult <- + callNode $ + impCli + <> "const enc = Cli.encryptTransportChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 plaintext + <> ");" + <> jsOut "enc" + tsResult `shouldBe` expected + + it "decryptTransportChunk decrypts Haskell-encrypted data" $ do + let plaintext = B.pack ([200 .. 255] <> [0 .. 99]) + state0 = either (error . show) id $ LC.cbInit dhSecret (C.cbNonce nonce24) + (cipher, state1) = LC.sbEncryptChunk state0 plaintext + tag = BA.convert $ LC.sbAuth state1 :: B.ByteString + encData = cipher <> tag + tsResult <- + callNode $ + impCli + <> "const r = Cli.decryptTransportChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 encData + <> ");" + <> "if (!r.valid) throw new Error('invalid');" + <> jsOut "r.content" + tsResult `shouldBe` plaintext + + it "round-trip encrypt then decrypt" $ do + let plaintext = B.pack [42, 42, 42, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + tsResult <- + callNode $ + impCli + <> "const plain = " + <> jsUint8 plaintext + <> ";" + <> "const enc = Cli.encryptTransportChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> ",plain);" + <> "const r = Cli.decryptTransportChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> ",enc);" + <> "if (!r.valid) throw new Error('invalid');" + <> jsOut "r.content" + tsResult `shouldBe` plaintext + + it "rejects tampered ciphertext" $ do + let plaintext = B.pack [10 .. 40] + tsResult <- + callNode $ + impCli + <> "const enc = Cli.encryptTransportChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 plaintext + <> ");" + <> "enc[0] ^= 0xff;" + <> "const r = Cli.decryptTransportChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> ",enc);" + <> jsOut "new Uint8Array([r.valid ? 1 : 0])" + tsResult `shouldBe` B.pack [0] + + describe "constants" $ do + it "cbAuthenticatorSize" $ do + tsResult <- + callNode $ + impCli <> jsOut "new TextEncoder().encode(String(Cli.cbAuthenticatorSize))" + tsResult `shouldBe` "80" + +-- ── download (integration) ────────────────────────────────────────── + +tsDownloadTests :: Spec +tsDownloadTests = describe "download" $ do + -- Fixed X25519 key pairs (same as client tests) + let privARaw = B.pack [1 .. 32] + privA = throwCryptoError $ X25519.secretKey privARaw + pubA = X25519.toPublic privA + pubARaw = BA.convert pubA :: B.ByteString + privBRaw = B.pack [33 .. 64] + privB = throwCryptoError $ X25519.secretKey privBRaw + pubB = X25519.toPublic privB + pubBRaw = BA.convert pubB :: B.ByteString + nonce24 = B.pack [0 .. 23] + -- File-level key/nonce (different from transport) + fileKey32 = B.pack [1 .. 32] + fileNonce24 = B.pack [1 .. 24] + fileCbNonce = C.cbNonce fileNonce24 + fileSbKey = C.unsafeSbKey fileKey32 + + describe "processFileResponse" $ do + it "derives DH secret matching Haskell" $ do + -- Simulate: client has privA, server sends pubB + let hsDhSecret = C.dh' (C.PublicKeyX25519 pubB) (C.PrivateKeyX25519 privA) + hsDhBytes = case hsDhSecret of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + tsDhSecret <- + callNode $ + impDl + <> "const dh = Dl.processFileResponse(" + <> jsUint8 privARaw + <> "," + <> jsUint8 pubBRaw + <> ");" + <> jsOut "dh" + tsDhSecret `shouldBe` hsDhBytes + + describe "decryptReceivedChunk" $ do + it "transport decrypt with digest verification" $ do + -- Haskell: transport-encrypt a chunk + let dhSecret = C.dh' (C.PublicKeyX25519 pubA) (C.PrivateKeyX25519 privB) + dhSecretBytes = case dhSecret of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + chunkData = B.pack [50 .. 149] + chunkDigest = C.sha256Hash chunkData + state0 = either (error . show) id $ LC.cbInit dhSecret (C.cbNonce nonce24) + (cipher, state1) = LC.sbEncryptChunk state0 chunkData + tag = BA.convert (LC.sbAuth state1) :: B.ByteString + encData = cipher <> tag + tsResult <- + callNode $ + impDl + <> "const r = Dl.decryptReceivedChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 encData + <> "," + <> jsUint8 chunkDigest + <> ");" + <> jsOut "r" + tsResult `shouldBe` chunkData + + it "rejects wrong digest" $ do + let dhSecret = C.dh' (C.PublicKeyX25519 pubA) (C.PrivateKeyX25519 privB) + dhSecretBytes = case dhSecret of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + chunkData = B.pack [50 .. 149] + wrongDigest = B.replicate 32 0xff + state0 = either (error . show) id $ LC.cbInit dhSecret (C.cbNonce nonce24) + (cipher, state1) = LC.sbEncryptChunk state0 chunkData + tag = BA.convert (LC.sbAuth state1) :: B.ByteString + encData = cipher <> tag + tsResult <- + callNode $ + impDl + <> "let ok = false; try { Dl.decryptReceivedChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 encData + <> "," + <> jsUint8 wrongDigest + <> "); } catch(e) { ok = e.message.includes('digest'); }" + <> jsOut "new Uint8Array([ok ? 1 : 0])" + tsResult `shouldBe` B.pack [1] + + it "allows null digest (skip verification)" $ do + let dhSecret = C.dh' (C.PublicKeyX25519 pubA) (C.PrivateKeyX25519 privB) + dhSecretBytes = case dhSecret of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + chunkData = B.pack [10 .. 50] + state0 = either (error . show) id $ LC.cbInit dhSecret (C.cbNonce nonce24) + (cipher, state1) = LC.sbEncryptChunk state0 chunkData + tag = BA.convert (LC.sbAuth state1) :: B.ByteString + encData = cipher <> tag + tsResult <- + callNode $ + impDl + <> "const r = Dl.decryptReceivedChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 encData + <> ",null);" + <> jsOut "r" + tsResult `shouldBe` chunkData + + describe "full pipeline" $ do + it "Haskell file-encrypt + transport-encrypt -> TS transport-decrypt + file-decrypt" $ do + -- Step 1: file-level encryption (matches Haskell encryptFile) + let source = "Integration test: full download pipeline!" :: B.ByteString + hdr = FileHeader "pipeline.txt" Nothing + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 256 :: Int64 + sb = either (error . show) id $ LC.sbInit fileSbKey fileCbNonce + lenStr = smpEncode fileSize' + (hdrEnc, sb1) = LC.sbEncryptChunk sb (lenStr <> fileHdr) + (srcEnc, sb2) = LC.sbEncryptChunk sb1 source + padLen = encSize - 16 - fileSize' - 8 + padding = B.replicate (fromIntegral padLen) 0x23 + (padEnc, sb3) = LC.sbEncryptChunk sb2 padding + fileTag = BA.convert (LC.sbAuth sb3) :: B.ByteString + fileEncrypted = B.concat [hdrEnc, srcEnc, padEnc, fileTag] + -- Step 2: transport-level encryption (simulates server sending chunk) + let dhSecret = C.dh' (C.PublicKeyX25519 pubA) (C.PrivateKeyX25519 privB) + dhSecretBytes = case dhSecret of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + ts0 = either (error . show) id $ LC.cbInit dhSecret (C.cbNonce nonce24) + (transportCipher, ts1) = LC.sbEncryptChunk ts0 fileEncrypted + transportTag = BA.convert (LC.sbAuth ts1) :: B.ByteString + transportEncData = transportCipher <> transportTag + -- Step 3: TS decrypts transport, then file-level + tsResult <- + callNode $ + impDl + <> "const chunk = Dl.decryptReceivedChunk(" + <> jsUint8 dhSecretBytes + <> "," + <> jsUint8 nonce24 + <> "," + <> jsUint8 transportEncData + <> ",null);" + <> "const r = F.decryptChunks(" + <> show encSize + <> "n,[chunk]," + <> jsUint8 fileKey32 + <> "," + <> jsUint8 fileNonce24 + <> ");" + <> "const hdrBytes = F.encodeFileHeader(r.header);" + <> jsOut "new Uint8Array([...hdrBytes, ...r.content])" + tsResult `shouldBe` (fileHdr <> source) + + it "multi-chunk file: Haskell encrypt -> TS decrypt" $ do + -- File content that spans two chunks when file-encrypted + let source = B.pack (take 200 $ cycle [0 .. 255]) + hdr = FileHeader "multi.bin" Nothing + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 512 :: Int64 + sb = either (error . show) id $ LC.sbInit fileSbKey fileCbNonce + lenStr = smpEncode fileSize' + (hdrEnc, sb1) = LC.sbEncryptChunk sb (lenStr <> fileHdr) + (srcEnc, sb2) = LC.sbEncryptChunk sb1 source + padLen = encSize - 16 - fileSize' - 8 + padding = B.replicate (fromIntegral padLen) 0x23 + (padEnc, sb3) = LC.sbEncryptChunk sb2 padding + fileTag = BA.convert (LC.sbAuth sb3) :: B.ByteString + fileEncrypted = B.concat [hdrEnc, srcEnc, padEnc, fileTag] + -- Split file-encrypted data into two "chunks" and transport-encrypt each + let splitPt = B.length fileEncrypted `div` 2 + fileChunk1 = B.take splitPt fileEncrypted + fileChunk2 = B.drop splitPt fileEncrypted + -- Transport encrypt chunk 1 (with separate DH / nonce per chunk) + dhSecret1 = C.dh' (C.PublicKeyX25519 pubA) (C.PrivateKeyX25519 privB) + dhSecret1Bytes = case dhSecret1 of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + nonce1 = nonce24 + t1s0 = either (error . show) id $ LC.cbInit dhSecret1 (C.cbNonce nonce1) + (t1cipher, t1s1) = LC.sbEncryptChunk t1s0 fileChunk1 + t1tag = BA.convert (LC.sbAuth t1s1) :: B.ByteString + transportEnc1 = t1cipher <> t1tag + -- Transport encrypt chunk 2 (different nonce) + nonce2 = B.pack [24 .. 47] + dhSecret2 = C.dh' (C.PublicKeyX25519 pubB) (C.PrivateKeyX25519 privA) + dhSecret2Bytes = case dhSecret2 of C.DhSecretX25519 k -> BA.convert k :: B.ByteString + t2s0 = either (error . show) id $ LC.cbInit dhSecret2 (C.cbNonce nonce2) + (t2cipher, t2s1) = LC.sbEncryptChunk t2s0 fileChunk2 + t2tag = BA.convert (LC.sbAuth t2s1) :: B.ByteString + transportEnc2 = t2cipher <> t2tag + -- TS: transport-decrypt each chunk, then file-level decrypt the concatenation + tsResult <- + callNode $ + impDl + <> "const c1 = Dl.decryptReceivedChunk(" + <> jsUint8 dhSecret1Bytes + <> "," + <> jsUint8 nonce1 + <> "," + <> jsUint8 transportEnc1 + <> ",null);" + <> "const c2 = Dl.decryptReceivedChunk(" + <> jsUint8 dhSecret2Bytes + <> "," + <> jsUint8 nonce2 + <> "," + <> jsUint8 transportEnc2 + <> ",null);" + <> "const r = F.decryptChunks(" + <> show encSize + <> "n,[c1,c2]," + <> jsUint8 fileKey32 + <> "," + <> jsUint8 fileNonce24 + <> ");" + <> "const hdrBytes = F.encodeFileHeader(r.header);" + <> jsOut "new Uint8Array([...hdrBytes, ...r.content])" + tsResult `shouldBe` (fileHdr <> source) + + describe "FGET + FRFile round-trip" $ do + it "encode FGET -> decode FRFile -> process -> transport decrypt" $ do + -- Client side: generate FGET command + let dhSecret = C.dh' (C.PublicKeyX25519 pubA) (C.PrivateKeyX25519 privB) + chunkData = "FGET round-trip test data" :: B.ByteString + state0 = either (error . show) id $ LC.cbInit dhSecret (C.cbNonce nonce24) + (cipher, state1) = LC.sbEncryptChunk state0 chunkData + tag = BA.convert (LC.sbAuth state1) :: B.ByteString + encData = cipher <> tag + -- Simulate server response: FILE + -- Server sends pubA (client has privB to do DH) + serverPubDer = C.encodePubKey (C.PublicKeyX25519 pubA) + fileResponseBytes = "FILE " <> smpEncode serverPubDer <> nonce24 + -- TS: parse FRFile response, derive DH secret, decrypt transport chunk + tsResult <- + callNode $ + impDl + <> "const resp = Cmd.decodeResponse(" + <> jsUint8 fileResponseBytes + <> ");" + <> "if (resp.type !== 'FRFile') throw new Error('expected FRFile');" + <> "const dhSecret = Dl.processFileResponse(" + <> jsUint8 privBRaw + <> ",resp.rcvDhKey);" + <> "const r = Dl.decryptReceivedChunk(dhSecret," + <> "resp.nonce," + <> jsUint8 encData + <> ",null);" + <> jsOut "r" + tsResult `shouldBe` chunkData + + describe "processDownloadedFile" $ do + it "decrypts file from transport-decrypted chunks" $ do + let source = "processDownloadedFile test" :: B.ByteString + hdr = FileHeader "download.txt" (Just "v1") + fileHdr = smpEncode hdr + fileSize' = fromIntegral (B.length fileHdr + B.length source) :: Int64 + encSize = 256 :: Int64 + sb = either (error . show) id $ LC.sbInit fileSbKey fileCbNonce + lenStr = smpEncode fileSize' + (hdrEnc, sb1) = LC.sbEncryptChunk sb (lenStr <> fileHdr) + (srcEnc, sb2) = LC.sbEncryptChunk sb1 source + padLen = encSize - 16 - fileSize' - 8 + padding = B.replicate (fromIntegral padLen) 0x23 + (padEnc, sb3) = LC.sbEncryptChunk sb2 padding + fileTag = BA.convert (LC.sbAuth sb3) :: B.ByteString + fileEncrypted = B.concat [hdrEnc, srcEnc, padEnc, fileTag] + -- TS: call processDownloadedFile with a minimal FileDescription-like object + tsResult <- + callNode $ + impDl + <> "const fd = {size: " + <> show encSize + <> "," + <> "key: " + <> jsUint8 fileKey32 + <> "," + <> "nonce: " + <> jsUint8 fileNonce24 + <> "};" + <> "const r = Dl.processDownloadedFile(fd, [" + <> jsUint8 fileEncrypted + <> "]);" + <> "const hdrBytes = F.encodeFileHeader(r.header);" + <> jsOut "new Uint8Array([...hdrBytes, ...r.content])" + tsResult `shouldBe` (fileHdr <> source) + +-- ── protocol/address ────────────────────────────────────────────── + +tsAddressTests :: Spec +tsAddressTests = describe "protocol/address" $ do + it "parseXFTPServer with port" $ do + let addr = "xftp://LcJUMfVhwD8yxjAiSaDzzGF3-kLG4Uh0Fl_ZIjrRwjI=@localhost:8000" :: String + expectedKH :: B.ByteString + expectedKH = either error id $ strDecode "LcJUMfVhwD8yxjAiSaDzzGF3-kLG4Uh0Fl_ZIjrRwjI=" + result <- + callNode $ + impAddr + <> "const s = Addr.parseXFTPServer('" + <> addr + <> "');" + <> jsOut "new Uint8Array([...s.keyHash, ...new TextEncoder().encode(s.host + ':' + s.port)])" + let (kh, hostPort) = B.splitAt 32 result + kh `shouldBe` expectedKH + hostPort `shouldBe` "localhost:8000" + + it "parseXFTPServer default port" $ do + result <- + callNode $ + impAddr + <> "const s = Addr.parseXFTPServer('xftp://LcJUMfVhwD8yxjAiSaDzzGF3-kLG4Uh0Fl_ZIjrRwjI=@example.com');" + <> jsOut "new TextEncoder().encode(s.host + ':' + s.port)" + result `shouldBe` "example.com:443" + + it "parseXFTPServer multi-host takes first" $ do + result <- + callNode $ + impAddr + <> "const s = Addr.parseXFTPServer('xftp://LcJUMfVhwD8yxjAiSaDzzGF3-kLG4Uh0Fl_ZIjrRwjI=@host1.com:5000,host2.com');" + <> jsOut "new TextEncoder().encode(s.host + ':' + s.port)" + result `shouldBe` "host1.com:5000" + +-- ── integration ─────────────────────────────────────────────────── + +tsIntegrationTests :: Spec +tsIntegrationTests = describe "integration" $ do + it "web handshake with Ed25519 identity verification" $ + webHandshakeTest testXFTPServerConfigEd25519SNI "tests/fixtures/ed25519/ca.crt" + it "web handshake with Ed448 identity verification" $ + webHandshakeTest testXFTPServerConfigSNI "tests/fixtures/ca.crt" + it "connectXFTP + pingXFTP" $ + pingTest testXFTPServerConfigEd25519SNI "tests/fixtures/ed25519/ca.crt" + it "full round-trip: create, upload, download, ack, addRecipients, delete" $ + fullRoundTripTest testXFTPServerConfigEd25519SNI "tests/fixtures/ed25519/ca.crt" + it "agent URI round-trip" agentURIRoundTripTest + it "agent upload + download round-trip" $ + agentUploadDownloadTest testXFTPServerConfigEd25519SNI "tests/fixtures/ed25519/ca.crt" + it "agent delete + verify gone" $ + agentDeleteTest testXFTPServerConfigEd25519SNI "tests/fixtures/ed25519/ca.crt" + it "agent redirect: upload with redirect, download" $ + agentRedirectTest testXFTPServerConfigEd25519SNI "tests/fixtures/ed25519/ca.crt" + it "cross-language: TS upload, Haskell download" $ + tsUploadHaskellDownloadTest testXFTPServerConfigSNI "tests/fixtures/ca.crt" + it "cross-language: TS upload with redirect, Haskell download" $ + tsUploadRedirectHaskellDownloadTest testXFTPServerConfigSNI "tests/fixtures/ca.crt" + it "cross-language: Haskell upload, TS download" $ + haskellUploadTsDownloadTest testXFTPServerConfigSNI + +webHandshakeTest :: XFTPServerConfig -> FilePath -> Expectation +webHandshakeTest cfg caFile = do + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + result <- + callNode $ + "import http2 from 'node:http2';\ + \import crypto from 'node:crypto';\ + \import sodium from 'libsodium-wrappers-sumo';\ + \import * as Addr from './dist/protocol/address.js';\ + \import * as Hs from './dist/protocol/handshake.js';\ + \import * as Id from './dist/crypto/identity.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const readBody = s => new Promise((ok, err) => {\ + \const c = [];\ + \s.on('data', d => c.push(d));\ + \s.on('end', () => ok(Buffer.concat(c)));\ + \s.on('error', err);\ + \});\ + \const client = http2.connect('https://' + server.host + ':' + server.port, {rejectUnauthorized: false});\ + \const challenge = new Uint8Array(crypto.randomBytes(32));\ + \const s1 = client.request({':method': 'POST', ':path': '/'});\ + \s1.end(Buffer.from(Hs.encodeClientHello({webChallenge: challenge})));\ + \const hs = Hs.decodeServerHandshake(new Uint8Array(await readBody(s1)));\ + \const idOk = hs.webIdentityProof\ + \ ? Id.verifyIdentityProof({certChainDer: hs.certChainDer, signedKeyDer: hs.signedKeyDer,\ + \sigBytes: hs.webIdentityProof, challenge, sessionId: hs.sessionId, keyHash: server.keyHash})\ + \ : false;\ + \const ver = hs.xftpVersionRange.maxVersion;\ + \const s2 = client.request({':method': 'POST', ':path': '/'});\ + \s2.end(Buffer.from(Hs.encodeClientHandshake({xftpVersion: ver, keyHash: server.keyHash})));\ + \const ack = await readBody(s2);\ + \client.close();" + <> jsOut "new Uint8Array([idOk ? 1 : 0, ack.length === 0 ? 1 : 0])" + result `shouldBe` B.pack [1, 1] + +pingTest :: XFTPServerConfig -> FilePath -> Expectation +pingTest cfg caFile = do + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + result <- + callNode $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import * as Addr from './dist/protocol/address.js';\ + \import {connectXFTP, pingXFTP, closeXFTP} from './dist/client.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const c = await connectXFTP(server);\ + \await pingXFTP(c);\ + \closeXFTP(c);" + <> jsOut "new Uint8Array([1])" + result `shouldBe` B.pack [1] + +fullRoundTripTest :: XFTPServerConfig -> FilePath -> Expectation +fullRoundTripTest cfg caFile = do + createDirectoryIfMissing False "tests/tmp/xftp-server-files" + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + result <- + callNode $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import crypto from 'node:crypto';\ + \import * as Addr from './dist/protocol/address.js';\ + \import * as K from './dist/crypto/keys.js';\ + \import {sha256} from './dist/crypto/digest.js';\ + \import {connectXFTP, createXFTPChunk, uploadXFTPChunk, downloadXFTPChunk,\ + \ ackXFTPChunk, addXFTPRecipients, deleteXFTPChunk, closeXFTP} from './dist/client.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const c = await connectXFTP(server);\ + \const sndKp = K.generateEd25519KeyPair();\ + \const rcvKp1 = K.generateEd25519KeyPair();\ + \const rcvKp2 = K.generateEd25519KeyPair();\ + \const chunkData = new Uint8Array(crypto.randomBytes(65536));\ + \const digest = sha256(chunkData);\ + \const file = {\ + \ sndKey: K.encodePubKeyEd25519(sndKp.publicKey),\ + \ size: chunkData.length,\ + \ digest\ + \};\ + \const rcvKeys = [K.encodePubKeyEd25519(rcvKp1.publicKey)];\ + \const {senderId, recipientIds} = await createXFTPChunk(c, sndKp.privateKey, file, rcvKeys, null);\ + \await uploadXFTPChunk(c, sndKp.privateKey, senderId, chunkData);\ + \const dl1 = await downloadXFTPChunk(c, rcvKp1.privateKey, recipientIds[0], digest);\ + \const match1 = dl1.length === chunkData.length && dl1.every((b, i) => b === chunkData[i]);\ + \await ackXFTPChunk(c, rcvKp1.privateKey, recipientIds[0]);\ + \const newIds = await addXFTPRecipients(c, sndKp.privateKey, senderId,\ + \ [K.encodePubKeyEd25519(rcvKp2.publicKey)]);\ + \const dl2 = await downloadXFTPChunk(c, rcvKp2.privateKey, newIds[0], digest);\ + \const match2 = dl2.length === chunkData.length && dl2.every((b, i) => b === chunkData[i]);\ + \await deleteXFTPChunk(c, sndKp.privateKey, senderId);\ + \closeXFTP(c);" + <> jsOut "new Uint8Array([match1 ? 1 : 0, match2 ? 1 : 0])" + result `shouldBe` B.pack [1, 1] + +agentURIRoundTripTest :: Expectation +agentURIRoundTripTest = do + result <- + callNode $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import * as Agent from './dist/agent.js';\ + \import * as Desc from './dist/protocol/description.js';\ + \await sodium.ready;\ + \const fd = {\ + \ party: 'recipient',\ + \ size: 65536,\ + \ digest: new Uint8Array(64).fill(0xab),\ + \ key: new Uint8Array(32).fill(0x01),\ + \ nonce: new Uint8Array(24).fill(0x02),\ + \ chunkSize: 65536,\ + \ chunks: [{\ + \ chunkNo: 1,\ + \ chunkSize: 65536,\ + \ digest: new Uint8Array(32).fill(0xcd),\ + \ replicas: [{\ + \ server: 'xftp://AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=@example.com:443',\ + \ replicaId: new Uint8Array([1,2,3]),\ + \ replicaKey: new Uint8Array([48,46,2,1,0,48,5,6,3,43,101,112,4,34,4,32,\ + \ 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32])\ + \ }]\ + \ }],\ + \ redirect: null\ + \};\ + \const uri = Agent.encodeDescriptionURI(fd);\ + \const fd2 = Agent.decodeDescriptionURI(uri);\ + \const yaml1 = Desc.encodeFileDescription(fd);\ + \const yaml2 = Desc.encodeFileDescription(fd2);\ + \const match = yaml1 === yaml2 ? 1 : 0;" + <> jsOut "new Uint8Array([match])" + result `shouldBe` B.pack [1] + +agentUploadDownloadTest :: XFTPServerConfig -> FilePath -> Expectation +agentUploadDownloadTest cfg caFile = do + createDirectoryIfMissing False "tests/tmp/xftp-server-files" + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + result <- + callNode $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import crypto from 'node:crypto';\ + \import * as Addr from './dist/protocol/address.js';\ + \import * as Agent from './dist/agent.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const originalData = new Uint8Array(crypto.randomBytes(50000));\ + \const encrypted = Agent.encryptFileForUpload(originalData, 'test-file.bin');\ + \const {rcvDescription, sndDescription, uri} = await Agent.uploadFile(server, encrypted);\ + \const fd = Agent.decodeDescriptionURI(uri);\ + \const {header, content} = await Agent.downloadFile(fd);\ + \const nameMatch = header.fileName === 'test-file.bin' ? 1 : 0;\ + \const sizeMatch = content.length === originalData.length ? 1 : 0;\ + \let dataMatch = 1;\ + \for (let i = 0; i < content.length; i++) {\ + \ if (content[i] !== originalData[i]) { dataMatch = 0; break; }\ + \};" + <> jsOut "new Uint8Array([nameMatch, sizeMatch, dataMatch])" + result `shouldBe` B.pack [1, 1, 1] + +agentDeleteTest :: XFTPServerConfig -> FilePath -> Expectation +agentDeleteTest cfg caFile = do + createDirectoryIfMissing False "tests/tmp/xftp-server-files" + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + result <- + callNode $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import crypto from 'node:crypto';\ + \import * as Addr from './dist/protocol/address.js';\ + \import * as Agent from './dist/agent.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const originalData = new Uint8Array(crypto.randomBytes(50000));\ + \const encrypted = Agent.encryptFileForUpload(originalData, 'del-test.bin');\ + \const {rcvDescription, sndDescription} = await Agent.uploadFile(server, encrypted);\ + \await Agent.deleteFile(sndDescription);\ + \let deleted = 0;\ + \try {\ + \ await Agent.downloadFile(rcvDescription);\ + \} catch (e) {\ + \ deleted = 1;\ + \};" + <> jsOut "new Uint8Array([deleted])" + result `shouldBe` B.pack [1] + +agentRedirectTest :: XFTPServerConfig -> FilePath -> Expectation +agentRedirectTest cfg caFile = do + createDirectoryIfMissing False "tests/tmp/xftp-server-files" + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + result <- + callNode $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import crypto from 'node:crypto';\ + \import * as Addr from './dist/protocol/address.js';\ + \import * as Agent from './dist/agent.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const originalData = new Uint8Array(crypto.randomBytes(100000));\ + \const encrypted = Agent.encryptFileForUpload(originalData, 'redirect-test.bin');\ + \const {rcvDescription, uri} = await Agent.uploadFile(server, encrypted, null, 50);\ + \const fd = Agent.decodeDescriptionURI(uri);\ + \const hasRedirect = fd.redirect !== null ? 1 : 0;\ + \const {header, content} = await Agent.downloadFile(fd);\ + \const nameMatch = header.fileName === 'redirect-test.bin' ? 1 : 0;\ + \const sizeMatch = content.length === originalData.length ? 1 : 0;\ + \let dataMatch = 1;\ + \for (let i = 0; i < content.length; i++) {\ + \ if (content[i] !== originalData[i]) { dataMatch = 0; break; }\ + \};" + <> jsOut "new Uint8Array([hasRedirect, nameMatch, sizeMatch, dataMatch])" + result `shouldBe` B.pack [1, 1, 1, 1] + +tsUploadHaskellDownloadTest :: XFTPServerConfig -> FilePath -> Expectation +tsUploadHaskellDownloadTest cfg caFile = do + createDirectoryIfMissing False "tests/tmp/xftp-server-files" + createDirectoryIfMissing False recipientFiles + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + (yamlDesc, originalData) <- + callNode2 $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import crypto from 'node:crypto';\ + \import * as Addr from './dist/protocol/address.js';\ + \import * as Agent from './dist/agent.js';\ + \import {encodeFileDescription} from './dist/protocol/description.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const originalData = new Uint8Array(crypto.randomBytes(50000));\ + \const encrypted = Agent.encryptFileForUpload(originalData, 'ts-to-hs.bin');\ + \const {rcvDescription} = await Agent.uploadFile(server, encrypted);\ + \const yaml = encodeFileDescription(rcvDescription);" + <> jsOut2 "Buffer.from(yaml)" "Buffer.from(originalData)" + let vfd :: ValidFileDescription 'FRecipient = either error id $ strDecode yamlDesc + withAgent 1 agentCfg initAgentServers testDB $ \rcp -> do + runRight_ $ xftpStartWorkers rcp (Just recipientFiles) + _ <- runRight $ xftpReceiveFile rcp 1 vfd Nothing True + rfProgress rcp 50000 + (_, _, RFDONE outPath) <- rfGet rcp + downloadedData <- B.readFile outPath + downloadedData `shouldBe` originalData + +tsUploadRedirectHaskellDownloadTest :: XFTPServerConfig -> FilePath -> Expectation +tsUploadRedirectHaskellDownloadTest cfg caFile = do + createDirectoryIfMissing False "tests/tmp/xftp-server-files" + createDirectoryIfMissing False recipientFiles + withXFTPServerCfg cfg $ \_ -> do + Fingerprint fp <- loadFileFingerprint caFile + let fpStr = map (toEnum . fromIntegral) $ B.unpack $ strEncode fp + addr = "xftp://" <> fpStr <> "@localhost:" <> xftpTestPort + (yamlDesc, originalData) <- + callNode2 $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import crypto from 'node:crypto';\ + \import * as Addr from './dist/protocol/address.js';\ + \import * as Agent from './dist/agent.js';\ + \import {encodeFileDescription} from './dist/protocol/description.js';\ + \await sodium.ready;\ + \const server = Addr.parseXFTPServer('" + <> addr + <> "');\ + \const originalData = new Uint8Array(crypto.randomBytes(100000));\ + \const encrypted = Agent.encryptFileForUpload(originalData, 'ts-redirect-to-hs.bin');\ + \const {rcvDescription} = await Agent.uploadFile(server, encrypted, null, 50);\ + \const yaml = encodeFileDescription(rcvDescription);" + <> jsOut2 "Buffer.from(yaml)" "Buffer.from(originalData)" + let vfd@(ValidFileDescription fd) :: ValidFileDescription 'FRecipient = either error id $ strDecode yamlDesc + redirect fd `shouldSatisfy` (/= Nothing) + withAgent 1 agentCfg initAgentServers testDB $ \rcp -> do + runRight_ $ xftpStartWorkers rcp (Just recipientFiles) + _ <- runRight $ xftpReceiveFile rcp 1 vfd Nothing True + outPath <- waitRfDone rcp + downloadedData <- B.readFile outPath + downloadedData `shouldBe` originalData + +haskellUploadTsDownloadTest :: XFTPServerConfig -> Expectation +haskellUploadTsDownloadTest cfg = do + createDirectoryIfMissing False "tests/tmp/xftp-server-files" + createDirectoryIfMissing False senderFiles + let filePath = senderFiles <> "/hs-to-ts.bin" + originalData <- B.pack <$> replicateM 50000 (randomIO :: IO Word8) + B.writeFile filePath originalData + withXFTPServerCfg cfg $ \_ -> do + vfd <- withAgent 1 agentCfg initAgentServers testDB $ \sndr -> do + runRight_ $ xftpStartWorkers sndr (Just senderFiles) + _ <- runRight $ xftpSendFile sndr 1 (CF.plain filePath) 1 + sfProgress sndr 50000 + (_, _, SFDONE _ [rfd]) <- sfGet sndr + pure rfd + let yamlDesc = strEncode vfd + result <- + callNode $ + "import sodium from 'libsodium-wrappers-sumo';\ + \import * as Agent from './dist/agent.js';\ + \import {decodeFileDescription, validateFileDescription} from './dist/protocol/description.js';\ + \await sodium.ready;\ + \const yaml = Buffer.from(" + <> jsUint8 yamlDesc + <> ").toString();\ + \const fd = decodeFileDescription(yaml);\ + \const err = validateFileDescription(fd);\ + \if (err) throw new Error(err);\ + \const {header, content} = await Agent.downloadFile(fd);\ + \const nameMatch = header.fileName === 'hs-to-ts.bin' ? 1 : 0;\ + \const sizeMatch = content.length === 50000 ? 1 : 0;\ + \const expected = " + <> jsUint8 originalData + <> ";\ + \let dataMatch = 1;\ + \for (let i = 0; i < content.length; i++) {\ + \ if (content[i] !== expected[i]) { dataMatch = 0; break; }\ + \};" + <> jsOut "new Uint8Array([nameMatch, sizeMatch, dataMatch])" + result `shouldBe` B.pack [1, 1, 1] + +rfProgress :: AgentClient -> Int64 -> IO () +rfProgress c _expected = loop 0 + where + loop prev = do + (_, _, RFPROG rcvd total) <- rfGet c + when (rcvd < total && rcvd > prev) $ loop rcvd + +sfProgress :: AgentClient -> Int64 -> IO () +sfProgress c _expected = loop 0 + where + loop prev = do + (_, _, SFPROG sent total) <- sfGet c + when (sent < total && sent > prev) $ loop sent + +waitRfDone :: AgentClient -> IO FilePath +waitRfDone c = do + ev <- rfGet c + case ev of + (_, _, RFDONE outPath) -> pure outPath + (_, _, RFPROG _ _) -> waitRfDone c + (_, _, RFERR e) -> error $ "RFERR: " <> show e + _ -> error $ "Unexpected event: " <> show ev + +callNode2 :: String -> IO (B.ByteString, B.ByteString) +callNode2 script = do + out <- callNode script + let (len1Bytes, rest1) = B.splitAt 4 out + len1 = fromIntegral (B.index len1Bytes 0) + fromIntegral (B.index len1Bytes 1) * 256 + fromIntegral (B.index len1Bytes 2) * 65536 + fromIntegral (B.index len1Bytes 3) * 16777216 + (data1, rest2) = B.splitAt len1 rest1 + (len2Bytes, rest3) = B.splitAt 4 rest2 + len2 = fromIntegral (B.index len2Bytes 0) + fromIntegral (B.index len2Bytes 1) * 256 + fromIntegral (B.index len2Bytes 2) * 65536 + fromIntegral (B.index len2Bytes 3) * 16777216 + data2 = B.take len2 rest3 + pure (data1, data2) + +jsOut2 :: String -> String -> String +jsOut2 a b = "const __a = " <> a <> "; const __b = " <> b <> "; const __buf = Buffer.alloc(8 + __a.length + __b.length); __buf.writeUInt32LE(__a.length, 0); __a.copy(__buf, 4); __buf.writeUInt32LE(__b.length, 4 + __a.length); __b.copy(__buf, 8 + __a.length); process.stdout.write(__buf);" diff --git a/tests/fixtures/ed25519/ca.crt b/tests/fixtures/ed25519/ca.crt new file mode 100644 index 0000000000..d487b0de60 --- /dev/null +++ b/tests/fixtures/ed25519/ca.crt @@ -0,0 +1,10 @@ +-----BEGIN CERTIFICATE----- +MIIBazCCAR2gAwIBAgIUSTqS4QptGQWYoukUUuYqC6iV5TMwBQYDK2VwMCoxFjAU +BgNVBAMMDVNNUCBzZXJ2ZXIgQ0ExEDAOBgNVBAoMB1NpbXBsZVgwIBcNMjYwMjAy +MDkxMTM1WhgPMjEyNjAxMDkwOTExMzVaMCoxFjAUBgNVBAMMDVNNUCBzZXJ2ZXIg +Q0ExEDAOBgNVBAoMB1NpbXBsZVgwKjAFBgMrZXADIQAv7I91vFk1tu6bj7J8HfkA +c7vjTnae9LFz+fXXtjkJVqNTMFEwHQYDVR0OBBYEFJSRDsRRvAyWhRMrXfW0Apsw +FbIHMB8GA1UdIwQYMBaAFJSRDsRRvAyWhRMrXfW0ApswFbIHMA8GA1UdEwEB/wQF +MAMBAf8wBQYDK2VwA0EAa9btje9yq4avTR8AOOkLHvGG0F6CskcGUFCkEbdCU+7I +9Qx1E8TlK6SwtLAKGi+qoK89dsdKL7rY2KbSP3SMAg== +-----END CERTIFICATE----- diff --git a/tests/fixtures/ed25519/ca.key b/tests/fixtures/ed25519/ca.key new file mode 100644 index 0000000000..45ca424a1e --- /dev/null +++ b/tests/fixtures/ed25519/ca.key @@ -0,0 +1,3 @@ +-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEINrfCroxhwopILZmG394xna73ethj6Z6IJSdBY2KjmW2 +-----END PRIVATE KEY----- diff --git a/tests/fixtures/ed25519/server.crt b/tests/fixtures/ed25519/server.crt new file mode 100644 index 0000000000..feaa345de5 --- /dev/null +++ b/tests/fixtures/ed25519/server.crt @@ -0,0 +1,10 @@ +-----BEGIN CERTIFICATE----- +MIIBcTCCASOgAwIBAgIUGMY4bIefHdfLBMptm/MOtg3ekGEwBQYDK2VwMCoxFjAU +BgNVBAMMDVNNUCBzZXJ2ZXIgQ0ExEDAOBgNVBAoMB1NpbXBsZVgwIBcNMjYwMjAy +MDkxMTM1WhgPMjEyNjAxMDkwOTExMzVaMBQxEjAQBgNVBAMMCWxvY2FsaG9zdDAq +MAUGAytlcAMhANYHFcaIJ540sL66lt5GmPrd0HX3mogATKrnWHPWQaGmo28wbTAJ +BgNVHRMEAjAAMAsGA1UdDwQEAwIDyDATBgNVHSUEDDAKBggrBgEFBQcDATAdBgNV +HQ4EFgQUQlsiIdymULnrH8KY+N+dd5RQADMwHwYDVR0jBBgwFoAUlJEOxFG8DJaF +Eytd9bQCmzAVsgcwBQYDK2VwA0EAFXpm1Ucdoa4W1ZPE/28FRkoHeHiEfyHX0NFx +qz7fiV6ys6KnnlC+xLDX0HVLcppImdnm4qmKddCagRfE7h0zAw== +-----END CERTIFICATE----- diff --git a/tests/fixtures/ed25519/server.key b/tests/fixtures/ed25519/server.key new file mode 100644 index 0000000000..065e2e7e23 --- /dev/null +++ b/tests/fixtures/ed25519/server.key @@ -0,0 +1,3 @@ +-----BEGIN PRIVATE KEY----- +MC4CAQAwBQYDK2VwBCIEIJjrvSfyWU9Xdiery1u85BK0Syw5jmxIJdzo0idiIasu +-----END PRIVATE KEY----- diff --git a/xftp-web/.gitignore b/xftp-web/.gitignore new file mode 100644 index 0000000000..507b50d80c --- /dev/null +++ b/xftp-web/.gitignore @@ -0,0 +1,4 @@ +node_modules/ +dist/ +dist-web/ +package-lock.json diff --git a/xftp-web/README.md b/xftp-web/README.md new file mode 100644 index 0000000000..9b118b336a --- /dev/null +++ b/xftp-web/README.md @@ -0,0 +1,47 @@ +# xftp-web + +Browser-compatible XFTP file transfer client in TypeScript. + +## Prerequisites + +- Haskell toolchain with `cabal` (to build `xftp-server`) +- Node.js 20+ +- Chromium system dependencies (see below) + +## Setup + +```bash +# Build the XFTP server binary (from repo root) +cabal build xftp-server + +# Install JS dependencies +cd xftp-web +npm install + +# Install Chromium for Playwright (browser tests) +npx playwright install chromium +``` + +If Chromium fails to launch due to missing system libraries, install them with: + +```bash +# Requires root +npx playwright install-deps chromium +``` + +## Running tests + +```bash +# Browser round-trip test (vitest + Playwright headless Chromium) +npm run test +``` + +The browser test automatically starts an `xftp-server` instance on port 7000 via `globalSetup`, using certs from `tests/fixtures/`. + +## Build + +```bash +npm run build +``` + +Output goes to `dist/`. diff --git a/xftp-web/package.json b/xftp-web/package.json new file mode 100644 index 0000000000..125cef42b6 --- /dev/null +++ b/xftp-web/package.json @@ -0,0 +1,36 @@ +{ + "name": "xftp-web", + "version": "0.1.0", + "private": true, + "type": "module", + "main": "dist/index.js", + "types": "dist/index.d.ts", + "scripts": { + "postinstall": "ln -sf ../../../libsodium-sumo/dist/modules-sumo-esm/libsodium-sumo.mjs node_modules/libsodium-wrappers-sumo/dist/modules-sumo-esm/libsodium-sumo.mjs && npx playwright install chromium", + "build": "tsc", + "test": "vitest", + "dev": "vite --mode development", + "build:local": "vite build --mode development", + "build:prod": "vite build --mode production", + "preview": "vite preview", + "preview:prod": "vite build --mode production && vite preview", + "check:web": "tsc -p tsconfig.web.json --noEmit && tsc -p tsconfig.worker.json --noEmit", + "test:page": "playwright test test/page.spec.ts" + }, + "devDependencies": { + "@types/libsodium-wrappers-sumo": "^0.7.8", + "@types/node": "^20.0.0", + "@types/pako": "^2.0.3", + "@vitest/browser": "^3.0.0", + "@playwright/test": "^1.50.0", + "playwright": "^1.50.0", + "typescript": "^5.4.0", + "vite": "^6.0.0", + "vitest": "^3.0.0" + }, + "dependencies": { + "@noble/curves": "^1.9.7", + "libsodium-wrappers-sumo": "^0.7.13", + "pako": "^2.1.0" + } +} diff --git a/xftp-web/playwright.config.ts b/xftp-web/playwright.config.ts new file mode 100644 index 0000000000..ce32b12f49 --- /dev/null +++ b/xftp-web/playwright.config.ts @@ -0,0 +1,19 @@ +import {defineConfig} from '@playwright/test' + +export default defineConfig({ + testDir: './test', + testMatch: '**/*.spec.ts', + timeout: 60_000, + use: { + ignoreHTTPSErrors: true, + launchOptions: { + args: ['--ignore-certificate-errors'] + } + }, + webServer: { + command: 'npx vite build --mode development && npx vite preview', + url: 'http://localhost:4173', + reuseExistingServer: !process.env.CI + }, + globalSetup: './test/globalSetup.ts' +}) diff --git a/xftp-web/src/agent.ts b/xftp-web/src/agent.ts new file mode 100644 index 0000000000..8bea8c2aef --- /dev/null +++ b/xftp-web/src/agent.ts @@ -0,0 +1,388 @@ +// XFTP upload/download orchestration + URI encoding — Simplex.FileTransfer.Client.Main +// +// Combines all building blocks: encryption, chunking, XFTP client commands, +// file descriptions, and DEFLATE-compressed URI encoding. + +import pako from "pako" +import {encryptFile, encodeFileHeader} from "./crypto/file.js" +import {generateEd25519KeyPair, encodePubKeyEd25519, encodePrivKeyEd25519, decodePrivKeyEd25519, ed25519KeyPairFromSeed} from "./crypto/keys.js" +import {sha512} from "./crypto/digest.js" +import {prepareChunkSizes, prepareChunkSpecs, getChunkDigest, fileSizeLen, authTagSize} from "./protocol/chunks.js" +import { + encodeFileDescription, decodeFileDescription, validateFileDescription, + base64urlEncode, base64urlDecode, + type FileDescription +} from "./protocol/description.js" +import type {FileInfo} from "./protocol/commands.js" +import { + getXFTPServerClient, createXFTPChunk, uploadXFTPChunk, downloadXFTPChunk, downloadXFTPChunkRaw, + ackXFTPChunk, deleteXFTPChunk, type XFTPClientAgent +} from "./client.js" +export {newXFTPAgent, closeXFTPAgent, type XFTPClientAgent} from "./client.js" +import {processDownloadedFile, decryptReceivedChunk} from "./download.js" +import type {XFTPServer} from "./protocol/address.js" +import {formatXFTPServer, parseXFTPServer} from "./protocol/address.js" +import {concatBytes} from "./protocol/encoding.js" +import type {FileHeader} from "./crypto/file.js" + +// ── Types ─────────────────────────────────────────────────────── + +interface SentChunk { + chunkNo: number + senderId: Uint8Array + senderKey: Uint8Array // 64B libsodium Ed25519 private key + recipientId: Uint8Array + recipientKey: Uint8Array // 64B libsodium Ed25519 private key + chunkSize: number + digest: Uint8Array // SHA-256 + server: XFTPServer +} + +export interface EncryptedFileMetadata { + digest: Uint8Array // SHA-512 of encData + key: Uint8Array // 32B SbKey + nonce: Uint8Array // 24B CbNonce + chunkSizes: number[] +} + +export interface EncryptedFileInfo extends EncryptedFileMetadata { + encData: Uint8Array +} + +export interface UploadResult { + rcvDescription: FileDescription + sndDescription: FileDescription + uri: string // base64url-encoded compressed YAML (no leading #) +} + +export interface DownloadResult { + header: FileHeader + content: Uint8Array +} + +// ── URI encoding/decoding (RFC §4.1: DEFLATE + base64url) ─────── + +export function encodeDescriptionURI(fd: FileDescription): string { + const yaml = encodeFileDescription(fd) + const compressed = pako.deflateRaw(new TextEncoder().encode(yaml)) + return base64urlEncode(compressed) +} + +export function decodeDescriptionURI(fragment: string): FileDescription { + const compressed = base64urlDecode(fragment) + const yaml = new TextDecoder().decode(pako.inflateRaw(compressed)) + const fd = decodeFileDescription(yaml) + const err = validateFileDescription(fd) + if (err) throw new Error("decodeDescriptionURI: " + err) + return fd +} + +// ── Upload ────────────────────────────────────────────────────── + +export function encryptFileForUpload(source: Uint8Array, fileName: string): EncryptedFileInfo { + const key = new Uint8Array(32) + const nonce = new Uint8Array(24) + crypto.getRandomValues(key) + crypto.getRandomValues(nonce) + const fileHdr = encodeFileHeader({fileName, fileExtra: null}) + const fileSize = BigInt(fileHdr.length + source.length) + const payloadSize = Number(fileSize) + fileSizeLen + authTagSize + const chunkSizes = prepareChunkSizes(payloadSize) + const encSize = BigInt(chunkSizes.reduce((a, b) => a + b, 0)) + const encData = encryptFile(source, fileHdr, key, nonce, fileSize, encSize) + const digest = sha512(encData) + return {encData, digest, key, nonce, chunkSizes} +} + +const DEFAULT_REDIRECT_THRESHOLD = 400 + +export interface UploadOptions { + onProgress?: (uploaded: number, total: number) => void + redirectThreshold?: number + readChunk?: (offset: number, size: number) => Promise +} + +export async function uploadFile( + agent: XFTPClientAgent, + server: XFTPServer, + encrypted: EncryptedFileMetadata, + options?: UploadOptions +): Promise { + const {onProgress, redirectThreshold, readChunk: readChunkOpt} = options ?? {} + const readChunk: (offset: number, size: number) => Promise = readChunkOpt + ? readChunkOpt + : ('encData' in encrypted + ? (off, sz) => Promise.resolve((encrypted as EncryptedFileInfo).encData.subarray(off, off + sz)) + : () => { throw new Error("uploadFile: readChunk required when encData is absent") }) + const total = encrypted.chunkSizes.reduce((a, b) => a + b, 0) + const specs = prepareChunkSpecs(encrypted.chunkSizes) + const client = await getXFTPServerClient(agent, server) + const sentChunks: SentChunk[] = [] + let uploaded = 0 + for (let i = 0; i < specs.length; i++) { + const spec = specs[i] + const chunkNo = i + 1 + const sndKp = generateEd25519KeyPair() + const rcvKp = generateEd25519KeyPair() + const chunkData = await readChunk(spec.chunkOffset, spec.chunkSize) + const chunkDigest = getChunkDigest(chunkData) + const fileInfo: FileInfo = { + sndKey: encodePubKeyEd25519(sndKp.publicKey), + size: spec.chunkSize, + digest: chunkDigest + } + const {senderId, recipientIds} = await createXFTPChunk( + client, sndKp.privateKey, fileInfo, [encodePubKeyEd25519(rcvKp.publicKey)] + ) + await uploadXFTPChunk(client, sndKp.privateKey, senderId, chunkData) + sentChunks.push({ + chunkNo, senderId, senderKey: sndKp.privateKey, + recipientId: recipientIds[0], recipientKey: rcvKp.privateKey, + chunkSize: spec.chunkSize, digest: chunkDigest, server + }) + uploaded += spec.chunkSize + onProgress?.(uploaded, total) + } + const rcvDescription = buildDescription("recipient", encrypted, sentChunks) + const sndDescription = buildDescription("sender", encrypted, sentChunks) + let uri = encodeDescriptionURI(rcvDescription) + let finalRcvDescription = rcvDescription + const threshold = redirectThreshold ?? DEFAULT_REDIRECT_THRESHOLD + if (uri.length > threshold && sentChunks.length > 1) { + finalRcvDescription = await uploadRedirectDescription(agent, server, rcvDescription) + uri = encodeDescriptionURI(finalRcvDescription) + } + return {rcvDescription: finalRcvDescription, sndDescription, uri} +} + +function buildDescription( + party: "recipient" | "sender", + enc: EncryptedFileMetadata, + chunks: SentChunk[] +): FileDescription { + const defChunkSize = enc.chunkSizes[0] + return { + party, + size: enc.chunkSizes.reduce((a, b) => a + b, 0), + digest: enc.digest, + key: enc.key, + nonce: enc.nonce, + chunkSize: defChunkSize, + chunks: chunks.map(c => ({ + chunkNo: c.chunkNo, + chunkSize: c.chunkSize, + digest: c.digest, + replicas: [{ + server: formatXFTPServer(c.server), + replicaId: party === "recipient" ? c.recipientId : c.senderId, + replicaKey: encodePrivKeyEd25519(party === "recipient" ? c.recipientKey : c.senderKey) + }] + })), + redirect: null + } +} + +async function uploadRedirectDescription( + agent: XFTPClientAgent, + server: XFTPServer, + innerFd: FileDescription +): Promise { + const client = await getXFTPServerClient(agent, server) + const yaml = encodeFileDescription(innerFd) + const yamlBytes = new TextEncoder().encode(yaml) + const enc = encryptFileForUpload(yamlBytes, "") + const specs = prepareChunkSpecs(enc.chunkSizes) + const sentChunks: SentChunk[] = [] + for (let i = 0; i < specs.length; i++) { + const spec = specs[i] + const chunkNo = i + 1 + const sndKp = generateEd25519KeyPair() + const rcvKp = generateEd25519KeyPair() + const chunkData = enc.encData.subarray(spec.chunkOffset, spec.chunkOffset + spec.chunkSize) + const chunkDigest = getChunkDigest(chunkData) + const fileInfo: FileInfo = { + sndKey: encodePubKeyEd25519(sndKp.publicKey), + size: spec.chunkSize, + digest: chunkDigest + } + const {senderId, recipientIds} = await createXFTPChunk( + client, sndKp.privateKey, fileInfo, [encodePubKeyEd25519(rcvKp.publicKey)] + ) + await uploadXFTPChunk(client, sndKp.privateKey, senderId, chunkData) + sentChunks.push({ + chunkNo, senderId, senderKey: sndKp.privateKey, + recipientId: recipientIds[0], recipientKey: rcvKp.privateKey, + chunkSize: spec.chunkSize, digest: chunkDigest, server + }) + } + return { + party: "recipient", + size: enc.chunkSizes.reduce((a, b) => a + b, 0), + digest: enc.digest, + key: enc.key, + nonce: enc.nonce, + chunkSize: enc.chunkSizes[0], + chunks: sentChunks.map(c => ({ + chunkNo: c.chunkNo, + chunkSize: c.chunkSize, + digest: c.digest, + replicas: [{ + server: formatXFTPServer(c.server), + replicaId: c.recipientId, + replicaKey: encodePrivKeyEd25519(c.recipientKey) + }] + })), + redirect: {size: innerFd.size, digest: innerFd.digest} + } +} + +// ── Download ──────────────────────────────────────────────────── + +export interface RawDownloadedChunk { + chunkNo: number + dhSecret: Uint8Array + nonce: Uint8Array + body: Uint8Array + digest: Uint8Array +} + +export interface DownloadRawOptions { + onProgress?: (downloaded: number, total: number) => void + concurrency?: number +} + +export async function downloadFileRaw( + agent: XFTPClientAgent, + fd: FileDescription, + onRawChunk: (chunk: RawDownloadedChunk) => Promise, + options?: DownloadRawOptions +): Promise { + const err = validateFileDescription(fd) + if (err) throw new Error("downloadFileRaw: " + err) + const {onProgress, concurrency = 1} = options ?? {} + // Resolve redirect on main thread (redirect data is small) + if (fd.redirect !== null) { + fd = await resolveRedirect(agent, fd) + } + const resolvedFd = fd + // Pre-connect to avoid race condition under concurrency + const servers = new Set(resolvedFd.chunks.map(c => c.replicas[0]?.server).filter(Boolean) as string[]) + for (const s of servers) { + await getXFTPServerClient(agent, parseXFTPServer(s)) + } + // Sliding-window parallel download + let downloaded = 0 + const queue = resolvedFd.chunks.slice() + let idx = 0 + async function worker() { + while (idx < queue.length) { + const i = idx++ + const chunk = queue[i] + const replica = chunk.replicas[0] + if (!replica) throw new Error("downloadFileRaw: chunk has no replicas") + const client = await getXFTPServerClient(agent, parseXFTPServer(replica.server)) + const seed = decodePrivKeyEd25519(replica.replicaKey) + const kp = ed25519KeyPairFromSeed(seed) + const raw = await downloadXFTPChunkRaw(client, kp.privateKey, replica.replicaId) + await onRawChunk({ + chunkNo: chunk.chunkNo, + dhSecret: raw.dhSecret, + nonce: raw.nonce, + body: raw.body, + digest: chunk.digest + }) + downloaded += chunk.chunkSize + onProgress?.(downloaded, resolvedFd.size) + } + } + const workers = Array.from({length: Math.min(concurrency, queue.length)}, () => worker()) + await Promise.all(workers) + return resolvedFd +} + +export async function ackFileChunks( + agent: XFTPClientAgent, fd: FileDescription +): Promise { + for (const chunk of fd.chunks) { + const replica = chunk.replicas[0] + if (!replica) continue + try { + const client = await getXFTPServerClient(agent, parseXFTPServer(replica.server)) + const seed = decodePrivKeyEd25519(replica.replicaKey) + const kp = ed25519KeyPairFromSeed(seed) + await ackXFTPChunk(client, kp.privateKey, replica.replicaId) + } catch (_) {} + } +} + +export async function downloadFile( + agent: XFTPClientAgent, + fd: FileDescription, + onProgress?: (downloaded: number, total: number) => void +): Promise { + const chunks: Uint8Array[] = [] + const resolvedFd = await downloadFileRaw(agent, fd, async (raw) => { + chunks[raw.chunkNo - 1] = decryptReceivedChunk( + raw.dhSecret, raw.nonce, raw.body, raw.digest + ) + }, {onProgress}) + const combined = chunks.length === 1 ? chunks[0] : concatBytes(...chunks) + if (combined.length !== resolvedFd.size) throw new Error("downloadFile: file size mismatch") + const digest = sha512(combined) + if (!digestEqual(digest, resolvedFd.digest)) throw new Error("downloadFile: file digest mismatch") + const result = processDownloadedFile(resolvedFd, chunks) + await ackFileChunks(agent, resolvedFd) + return result +} + +async function resolveRedirect( + agent: XFTPClientAgent, + fd: FileDescription +): Promise { + const plaintextChunks: Uint8Array[] = new Array(fd.chunks.length) + for (const chunk of fd.chunks) { + const replica = chunk.replicas[0] + if (!replica) throw new Error("resolveRedirect: chunk has no replicas") + const client = await getXFTPServerClient(agent, parseXFTPServer(replica.server)) + const seed = decodePrivKeyEd25519(replica.replicaKey) + const kp = ed25519KeyPairFromSeed(seed) + const data = await downloadXFTPChunk(client, kp.privateKey, replica.replicaId, chunk.digest) + plaintextChunks[chunk.chunkNo - 1] = data + } + const totalSize = plaintextChunks.reduce((s, c) => s + c.length, 0) + if (totalSize !== fd.size) throw new Error("resolveRedirect: redirect file size mismatch") + const combined = plaintextChunks.length === 1 ? plaintextChunks[0] : concatBytes(...plaintextChunks) + const digest = sha512(combined) + if (!digestEqual(digest, fd.digest)) throw new Error("resolveRedirect: redirect file digest mismatch") + const {content: yamlBytes} = processDownloadedFile(fd, plaintextChunks) + const innerFd = decodeFileDescription(new TextDecoder().decode(yamlBytes)) + const innerErr = validateFileDescription(innerFd) + if (innerErr) throw new Error("resolveRedirect: inner description invalid: " + innerErr) + if (innerFd.size !== fd.redirect!.size) throw new Error("resolveRedirect: redirect size mismatch") + if (!digestEqual(innerFd.digest, fd.redirect!.digest)) throw new Error("resolveRedirect: redirect digest mismatch") + // ACK redirect chunks (best-effort) + await ackFileChunks(agent, fd) + return innerFd +} + +// ── Delete ────────────────────────────────────────────────────── + +export async function deleteFile(agent: XFTPClientAgent, sndDescription: FileDescription): Promise { + for (const chunk of sndDescription.chunks) { + const replica = chunk.replicas[0] + if (!replica) throw new Error("deleteFile: chunk has no replicas") + const client = await getXFTPServerClient(agent, parseXFTPServer(replica.server)) + const seed = decodePrivKeyEd25519(replica.replicaKey) + const kp = ed25519KeyPairFromSeed(seed) + await deleteXFTPChunk(client, kp.privateKey, replica.replicaId) + } +} + +// ── Internal ──────────────────────────────────────────────────── + +function digestEqual(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + let diff = 0 + for (let i = 0; i < a.length; i++) diff |= a[i] ^ b[i] + return diff === 0 +} diff --git a/xftp-web/src/client.ts b/xftp-web/src/client.ts new file mode 100644 index 0000000000..602ce646d4 --- /dev/null +++ b/xftp-web/src/client.ts @@ -0,0 +1,262 @@ +// XFTP HTTP/2 client — Simplex.FileTransfer.Client +// +// Connects to XFTP server via HTTP/2, performs web handshake, +// sends authenticated commands, receives responses. +// +// Uses node:http2 in Node.js (tests), fetch() in browsers. + +import { + encodeAuthTransmission, encodeTransmission, decodeTransmission, + XFTP_BLOCK_SIZE, initialXFTPVersion, currentXFTPVersion +} from "./protocol/transmission.js" +import { + encodeClientHello, encodeClientHandshake, decodeServerHandshake, + compatibleVRange +} from "./protocol/handshake.js" +import {verifyIdentityProof} from "./crypto/identity.js" +import {generateX25519KeyPair, encodePubKeyX25519, dh} from "./crypto/keys.js" +import { + encodeFNEW, encodeFADD, encodeFPUT, encodeFGET, encodeFDEL, encodeFACK, encodePING, + decodeResponse, type FileResponse, type FileInfo +} from "./protocol/commands.js" +import {decryptReceivedChunk} from "./download.js" +import type {XFTPServer} from "./protocol/address.js" +import {concatBytes} from "./protocol/encoding.js" + +// ── Types ───────────────────────────────────────────────────────── + +export interface XFTPClient { + baseUrl: string + sessionId: Uint8Array + xftpVersion: number + transport: Transport +} + +interface Transport { + post(body: Uint8Array): Promise + close(): void +} + +// ── Transport implementations ───────────────────────────────────── + +const isNode = typeof globalThis.process !== "undefined" && globalThis.process.versions?.node + +async function createTransport(baseUrl: string): Promise { + if (isNode) { + return createNodeTransport(baseUrl) + } else { + return createBrowserTransport(baseUrl) + } +} + +async function createNodeTransport(baseUrl: string): Promise { + const http2 = await import("node:http2") + const session = http2.connect(baseUrl, {rejectUnauthorized: false}) + return { + async post(body: Uint8Array): Promise { + return new Promise((resolve, reject) => { + const req = session.request({":method": "POST", ":path": "/"}) + const chunks: Buffer[] = [] + req.on("data", (chunk: Buffer) => chunks.push(chunk)) + req.on("end", () => resolve(new Uint8Array(Buffer.concat(chunks)))) + req.on("error", reject) + req.end(Buffer.from(body)) + }) + }, + close() { + session.close() + } + } +} + +function createBrowserTransport(baseUrl: string): Transport { + return { + async post(body: Uint8Array): Promise { + const resp = await fetch(baseUrl, { + method: "POST", + body, + duplex: "half", + } as RequestInit) + if (!resp.ok) throw new Error(`fetch failed: ${resp.status}`) + return new Uint8Array(await resp.arrayBuffer()) + }, + close() {} + } +} + +// ── Client agent (connection pool) ─────────────────────────────── + +export interface XFTPClientAgent { + clients: Map +} + +export function newXFTPAgent(): XFTPClientAgent { + return {clients: new Map()} +} + +export async function getXFTPServerClient(agent: XFTPClientAgent, server: XFTPServer): Promise { + const key = "https://" + server.host + ":" + server.port + let c = agent.clients.get(key) + if (!c) { + c = await connectXFTP(server) + agent.clients.set(key, c) + } + return c +} + +export function closeXFTPServerClient(agent: XFTPClientAgent, server: XFTPServer): void { + const key = "https://" + server.host + ":" + server.port + const c = agent.clients.get(key) + if (c) { + agent.clients.delete(key) + c.transport.close() + } +} + +export function closeXFTPAgent(agent: XFTPClientAgent): void { + for (const c of agent.clients.values()) c.transport.close() + agent.clients.clear() +} + +// ── Connect + handshake ─────────────────────────────────────────── + +export async function connectXFTP(server: XFTPServer): Promise { + const baseUrl = "https://" + server.host + ":" + server.port + const transport = await createTransport(baseUrl) + + try { + // Step 1: send client hello with web challenge + const challenge = new Uint8Array(32) + crypto.getRandomValues(challenge) + const shsBody = await transport.post(encodeClientHello({webChallenge: challenge})) + + // Step 2: decode + verify server handshake + const hs = decodeServerHandshake(shsBody) + if (!hs.webIdentityProof) throw new Error("connectXFTP: no web identity proof") + const idOk = verifyIdentityProof({ + certChainDer: hs.certChainDer, + signedKeyDer: hs.signedKeyDer, + sigBytes: hs.webIdentityProof, + challenge, + sessionId: hs.sessionId, + keyHash: server.keyHash + }) + if (!idOk) throw new Error("connectXFTP: identity verification failed") + + // Step 3: version negotiation + const vr = compatibleVRange(hs.xftpVersionRange, {minVersion: initialXFTPVersion, maxVersion: currentXFTPVersion}) + if (!vr) throw new Error("connectXFTP: incompatible version") + const xftpVersion = vr.maxVersion + + // Step 4: send client handshake + const ack = await transport.post(encodeClientHandshake({xftpVersion, keyHash: server.keyHash})) + if (ack.length !== 0) throw new Error("connectXFTP: non-empty handshake ack") + + return {baseUrl, sessionId: hs.sessionId, xftpVersion, transport} + } catch (e) { + transport.close() + throw e + } +} + +// ── Send command ────────────────────────────────────────────────── + +async function sendXFTPCommand( + client: XFTPClient, + privateKey: Uint8Array, + entityId: Uint8Array, + cmdBytes: Uint8Array, + chunkData?: Uint8Array +): Promise<{response: FileResponse, body: Uint8Array}> { + const corrId = new Uint8Array(0) + const block = encodeAuthTransmission(client.sessionId, corrId, entityId, cmdBytes, privateKey) + const reqBody = chunkData ? concatBytes(block, chunkData) : block + const fullResp = await client.transport.post(reqBody) + if (fullResp.length < XFTP_BLOCK_SIZE) throw new Error("sendXFTPCommand: response too short") + const respBlock = fullResp.subarray(0, XFTP_BLOCK_SIZE) + const body = fullResp.subarray(XFTP_BLOCK_SIZE) + const {command} = decodeTransmission(client.sessionId, respBlock) + const response = decodeResponse(command) + if (response.type === "FRErr") throw new Error("XFTP error: " + response.err.type) + return {response, body} +} + +// ── Command wrappers ────────────────────────────────────────────── + +export async function createXFTPChunk( + c: XFTPClient, spKey: Uint8Array, file: FileInfo, + rcvKeys: Uint8Array[], auth: Uint8Array | null = null +): Promise<{senderId: Uint8Array, recipientIds: Uint8Array[]}> { + const {response} = await sendXFTPCommand(c, spKey, new Uint8Array(0), encodeFNEW(file, rcvKeys, auth)) + if (response.type !== "FRSndIds") throw new Error("unexpected response: " + response.type) + return {senderId: response.senderId, recipientIds: response.recipientIds} +} + +export async function addXFTPRecipients( + c: XFTPClient, spKey: Uint8Array, fId: Uint8Array, rcvKeys: Uint8Array[] +): Promise { + const {response} = await sendXFTPCommand(c, spKey, fId, encodeFADD(rcvKeys)) + if (response.type !== "FRRcvIds") throw new Error("unexpected response: " + response.type) + return response.recipientIds +} + +export async function uploadXFTPChunk( + c: XFTPClient, spKey: Uint8Array, fId: Uint8Array, chunkData: Uint8Array +): Promise { + const {response} = await sendXFTPCommand(c, spKey, fId, encodeFPUT(), chunkData) + if (response.type !== "FROk") throw new Error("unexpected response: " + response.type) +} + +export interface RawChunkResponse { + dhSecret: Uint8Array + nonce: Uint8Array + body: Uint8Array +} + +export async function downloadXFTPChunkRaw( + c: XFTPClient, rpKey: Uint8Array, fId: Uint8Array +): Promise { + const {publicKey, privateKey} = generateX25519KeyPair() + const cmd = encodeFGET(encodePubKeyX25519(publicKey)) + const {response, body} = await sendXFTPCommand(c, rpKey, fId, cmd) + if (response.type !== "FRFile") throw new Error("unexpected response: " + response.type) + const dhSecret = dh(response.rcvDhKey, privateKey) + return {dhSecret, nonce: response.nonce, body} +} + +export async function downloadXFTPChunk( + c: XFTPClient, rpKey: Uint8Array, fId: Uint8Array, digest?: Uint8Array +): Promise { + const {dhSecret, nonce, body} = await downloadXFTPChunkRaw(c, rpKey, fId) + return decryptReceivedChunk(dhSecret, nonce, body, digest ?? null) +} + +export async function deleteXFTPChunk( + c: XFTPClient, spKey: Uint8Array, sId: Uint8Array +): Promise { + const {response} = await sendXFTPCommand(c, spKey, sId, encodeFDEL()) + if (response.type !== "FROk") throw new Error("unexpected response: " + response.type) +} + +export async function ackXFTPChunk( + c: XFTPClient, rpKey: Uint8Array, rId: Uint8Array +): Promise { + const {response} = await sendXFTPCommand(c, rpKey, rId, encodeFACK()) + if (response.type !== "FROk") throw new Error("unexpected response: " + response.type) +} + +export async function pingXFTP(c: XFTPClient): Promise { + const corrId = new Uint8Array(0) + const block = encodeTransmission(c.sessionId, corrId, new Uint8Array(0), encodePING()) + const fullResp = await c.transport.post(block) + if (fullResp.length < XFTP_BLOCK_SIZE) throw new Error("pingXFTP: response too short") + const {command} = decodeTransmission(c.sessionId, fullResp.subarray(0, XFTP_BLOCK_SIZE)) + const response = decodeResponse(command) + if (response.type !== "FRPong") throw new Error("unexpected response: " + response.type) +} + +// ── Close ───────────────────────────────────────────────────────── + +export function closeXFTP(c: XFTPClient): void { + c.transport.close() +} diff --git a/xftp-web/src/crypto/digest.ts b/xftp-web/src/crypto/digest.ts new file mode 100644 index 0000000000..1c63b26b47 --- /dev/null +++ b/xftp-web/src/crypto/digest.ts @@ -0,0 +1,13 @@ +// Cryptographic hash functions matching Simplex.Messaging.Crypto (sha256Hash, sha512Hash). + +import sodium from "libsodium-wrappers-sumo" + +// SHA-256 digest (32 bytes) — Crypto.hs:1006 +export function sha256(data: Uint8Array): Uint8Array { + return sodium.crypto_hash_sha256(data) +} + +// SHA-512 digest (64 bytes) — Crypto.hs:1011 +export function sha512(data: Uint8Array): Uint8Array { + return sodium.crypto_hash_sha512(data) +} diff --git a/xftp-web/src/crypto/file.ts b/xftp-web/src/crypto/file.ts new file mode 100644 index 0000000000..e4e9bb3bc3 --- /dev/null +++ b/xftp-web/src/crypto/file.ts @@ -0,0 +1,94 @@ +// File-level encryption/decryption matching Simplex.FileTransfer.Crypto. +// Operates on in-memory Uint8Array (no file I/O needed for browser). + +import {Decoder, concatBytes, encodeInt64, encodeString, decodeString, encodeMaybe, decodeMaybe} from "../protocol/encoding.js" +import {sbInit, sbEncryptChunk, sbDecryptTailTag, sbAuth} from "./secretbox.js" + +const AUTH_TAG_SIZE = 16n + +// ── FileHeader ────────────────────────────────────────────────── + +export interface FileHeader { + fileName: string + fileExtra: string | null +} + +// Encoding matches Haskell: smpEncode (fileName, fileExtra) +// = smpEncode fileName <> smpEncode fileExtra +// = encodeString(fileName) + encodeMaybe(encodeString, fileExtra) +export function encodeFileHeader(hdr: FileHeader): Uint8Array { + return concatBytes( + encodeString(hdr.fileName), + encodeMaybe(encodeString, hdr.fileExtra) + ) +} + +// Parse FileHeader from decrypted content (first 1024 bytes examined). +// Returns the parsed header and remaining bytes (file content). +export function parseFileHeader(data: Uint8Array): {header: FileHeader, rest: Uint8Array} { + const hdrLen = Math.min(1024, data.length) + const d = new Decoder(data.subarray(0, hdrLen)) + const fileName = decodeString(d) + const fileExtra = decodeMaybe(decodeString, d) + const consumed = d.offset() + return { + header: {fileName, fileExtra}, + rest: data.subarray(consumed) + } +} + +// ── Encryption (FileTransfer.Crypto:encryptFile) ──────────────── + +// Encrypt file content with streaming XSalsa20-Poly1305. +// Output format: encrypted(Int64 fileSize | fileHdr | source | '#' padding) | 16-byte auth tag +// +// source — raw file content +// fileHdr — pre-encoded FileHeader bytes (from encodeFileHeader) +// key — 32-byte symmetric key +// nonce — 24-byte nonce +// fileSize — BigInt(fileHdr.length + source.length) +// encSize — total output size (including 16-byte auth tag) +export function encryptFile( + source: Uint8Array, + fileHdr: Uint8Array, + key: Uint8Array, + nonce: Uint8Array, + fileSize: bigint, + encSize: bigint +): Uint8Array { + const state = sbInit(key, nonce) + const lenStr = encodeInt64(fileSize) + const padLen = Number(encSize - AUTH_TAG_SIZE - fileSize - 8n) + if (padLen < 0) throw new Error("encryptFile: encSize too small") + const hdr = sbEncryptChunk(state, concatBytes(lenStr, fileHdr)) + const encSource = sbEncryptChunk(state, source) + const padding = new Uint8Array(padLen) + padding.fill(0x23) // '#' + const encPad = sbEncryptChunk(state, padding) + const tag = sbAuth(state) + return concatBytes(hdr, encSource, encPad, tag) +} + +// ── Decryption (FileTransfer.Crypto:decryptChunks) ────────────── + +// Decrypt one or more XFTP chunks into a FileHeader and file content. +// Chunks are concatenated, then decrypted as a single stream. +// +// encSize — total encrypted size (including 16-byte auth tag) +// chunks — downloaded XFTP chunk data (concatenated = full encrypted file) +// key — 32-byte symmetric key +// nonce — 24-byte nonce +export function decryptChunks( + encSize: bigint, + chunks: Uint8Array[], + key: Uint8Array, + nonce: Uint8Array +): {header: FileHeader, content: Uint8Array} { + if (chunks.length === 0) throw new Error("decryptChunks: empty chunks") + const paddedLen = encSize - AUTH_TAG_SIZE + const data = chunks.length === 1 ? chunks[0] : concatBytes(...chunks) + const {valid, content} = sbDecryptTailTag(key, nonce, paddedLen, data) + if (!valid) throw new Error("decryptChunks: invalid auth tag") + const {header, rest} = parseFileHeader(content) + return {header, content: rest} +} diff --git a/xftp-web/src/crypto/identity.ts b/xftp-web/src/crypto/identity.ts new file mode 100644 index 0000000000..796547c986 --- /dev/null +++ b/xftp-web/src/crypto/identity.ts @@ -0,0 +1,112 @@ +// Web handshake identity proof verification. +// +// Verifies server identity in the XFTP web handshake using the certificate +// chain from the protocol handshake (independent of TLS certificates). +// Ed25519 via libsodium, Ed448 via @noble/curves. + +import {Decoder, concatBytes} from "../protocol/encoding.js" +import {sha256} from "./digest.js" +import {verify, decodePubKeyEd25519, verifyEd448, decodePubKeyEd448} from "./keys.js" +import {chainIdCaCerts, extractSignedKey} from "../protocol/handshake.js" + +// ── ASN.1 DER helpers (minimal, for X.509 parsing) ───────────────── + +function derLen(d: Decoder): number { + const first = d.anyByte() + if (first < 0x80) return first + const n = first & 0x7f + if (n === 0 || n > 4) throw new Error("DER: unsupported length encoding") + let len = 0 + for (let i = 0; i < n; i++) len = (len << 8) | d.anyByte() + return len +} + +function derSkip(d: Decoder): void { + d.anyByte() + d.take(derLen(d)) +} + +function derReadElement(d: Decoder): Uint8Array { + const start = d.offset() + d.anyByte() + d.take(derLen(d)) + return d.buf.subarray(start, d.offset()) +} + +// ── X.509 certificate public key extraction ───────────────────────── + +// Extract SubjectPublicKeyInfo DER from a full X.509 certificate DER. +// Navigates: Certificate → TBSCertificate → skip version, serialNumber, +// signatureAlg, issuer, validity, subject → SubjectPublicKeyInfo. +export function extractCertPublicKeyInfo(certDer: Uint8Array): Uint8Array { + const d = new Decoder(certDer) + if (d.anyByte() !== 0x30) throw new Error("X.509: expected Certificate SEQUENCE") + derLen(d) + if (d.anyByte() !== 0x30) throw new Error("X.509: expected TBSCertificate SEQUENCE") + derLen(d) + if (d.buf[d.offset()] === 0xa0) derSkip(d) // version [0] EXPLICIT (optional) + derSkip(d) // serialNumber + derSkip(d) // signature AlgorithmIdentifier + derSkip(d) // issuer + derSkip(d) // validity + derSkip(d) // subject + return derReadElement(d) // SubjectPublicKeyInfo +} + +// Detect certificate key algorithm from SPKI DER prefix. +// Ed25519 OID 1.3.101.112: byte 8 = 0x70, SPKI = 44 bytes +// Ed448 OID 1.3.101.113: byte 8 = 0x71, SPKI = 69 bytes +type CertKeyAlgorithm = 'ed25519' | 'ed448' + +function detectKeyAlgorithm(spki: Uint8Array): CertKeyAlgorithm { + if (spki.length === 44 && spki[8] === 0x70) return 'ed25519' + if (spki.length === 69 && spki[8] === 0x71) return 'ed448' + throw new Error("unsupported certificate key algorithm") +} + +// Extract raw public key from SPKI DER, auto-detecting Ed25519 or Ed448. +function extractCertRawKey(spki: Uint8Array): {key: Uint8Array, alg: CertKeyAlgorithm} { + const alg = detectKeyAlgorithm(spki) + const key = alg === 'ed25519' ? decodePubKeyEd25519(spki) : decodePubKeyEd448(spki) + return {key, alg} +} + +// Verify signature using the appropriate algorithm. +function verifySig(alg: CertKeyAlgorithm, key: Uint8Array, sig: Uint8Array, msg: Uint8Array): boolean { + return alg === 'ed25519' ? verify(key, sig, msg) : verifyEd448(key, sig, msg) +} + +// ── Identity proof verification ───────────────────────────────────── + +export interface IdentityVerification { + certChainDer: Uint8Array[] + signedKeyDer: Uint8Array + sigBytes: Uint8Array + challenge: Uint8Array + sessionId: Uint8Array + keyHash: Uint8Array +} + +// Verify server identity proof from XFTP web handshake. +// 1. Certificate chain has valid structure (2-4 certs) +// 2. SHA-256(idCert) matches expected keyHash +// 3. Challenge signature valid: verify(leafKey, sigBytes, challenge || sessionId) +// 4. DH key signature valid: verify(leafKey, signedKey.signature, signedKey.objectDer) +export function verifyIdentityProof(v: IdentityVerification): boolean { + const cc = chainIdCaCerts(v.certChainDer) + if (cc.type !== 'valid') return false + const fp = sha256(cc.idCert) + if (!constantTimeEqual(fp, v.keyHash)) return false + const spki = extractCertPublicKeyInfo(cc.leafCert) + const {key, alg} = extractCertRawKey(spki) + if (!verifySig(alg, key, v.sigBytes, concatBytes(v.challenge, v.sessionId))) return false + const sk = extractSignedKey(v.signedKeyDer) + return verifySig(alg, key, sk.signature, sk.objectDer) +} + +function constantTimeEqual(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + let diff = 0 + for (let i = 0; i < a.length; i++) diff |= a[i] ^ b[i] + return diff === 0 +} diff --git a/xftp-web/src/crypto/keys.ts b/xftp-web/src/crypto/keys.ts new file mode 100644 index 0000000000..fccb0d5030 --- /dev/null +++ b/xftp-web/src/crypto/keys.ts @@ -0,0 +1,172 @@ +// Key generation, signing, DH — Simplex.Messaging.Crypto (Ed25519/X25519/Ed448 functions). + +import sodium from "libsodium-wrappers-sumo" +import {ed448} from "@noble/curves/ed448" +import {sha256} from "./digest.js" +import {concatBytes} from "../protocol/encoding.js" + +// -- Ed25519 key generation (Crypto.hs:726 generateAuthKeyPair) + +export interface Ed25519KeyPair { + publicKey: Uint8Array // 32 bytes raw + privateKey: Uint8Array // 64 bytes (libsodium: seed || pubkey) +} + +export function generateEd25519KeyPair(): Ed25519KeyPair { + const kp = sodium.crypto_sign_keypair() + return {publicKey: kp.publicKey, privateKey: kp.privateKey} +} + +// Generate from known 32-byte seed (deterministic, for testing/interop). +export function ed25519KeyPairFromSeed(seed: Uint8Array): Ed25519KeyPair { + const kp = sodium.crypto_sign_seed_keypair(seed) + return {publicKey: kp.publicKey, privateKey: kp.privateKey} +} + +// -- X25519 key generation (Crypto.hs via generateKeyPair) + +export interface X25519KeyPair { + publicKey: Uint8Array // 32 bytes + privateKey: Uint8Array // 32 bytes +} + +export function generateX25519KeyPair(): X25519KeyPair { + const kp = sodium.crypto_box_keypair() + return {publicKey: kp.publicKey, privateKey: kp.privateKey} +} + +// Derive X25519 keypair from raw 32-byte private key. +export function x25519KeyPairFromPrivate(privateKey: Uint8Array): X25519KeyPair { + const publicKey = sodium.crypto_scalarmult_base(privateKey) + return {publicKey, privateKey} +} + +// -- Ed25519 signing (Crypto.hs:1175 sign') + +export function sign(privateKey: Uint8Array, msg: Uint8Array): Uint8Array { + return sodium.crypto_sign_detached(msg, privateKey) +} + +// -- Ed25519 verification (Crypto.hs:1270 verify') + +export function verify(publicKey: Uint8Array, sig: Uint8Array, msg: Uint8Array): boolean { + try { + return sodium.crypto_sign_verify_detached(sig, msg, publicKey) + } catch { + return false + } +} + +// -- X25519 Diffie-Hellman (Crypto.hs:1280 dh') + +export function dh(publicKey: Uint8Array, privateKey: Uint8Array): Uint8Array { + return sodium.crypto_scalarmult(privateKey, publicKey) +} + +// -- DER encoding for Ed25519 public keys (RFC 8410, SubjectPublicKeyInfo) +// SEQUENCE { SEQUENCE { OID 1.3.101.112 } BIT STRING { 0x00 <32 bytes> } } + +const ED25519_PUBKEY_DER_PREFIX = new Uint8Array([ + 0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00, +]) + +const X25519_PUBKEY_DER_PREFIX = new Uint8Array([ + 0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x03, 0x21, 0x00, +]) + +export function encodePubKeyEd25519(rawPubKey: Uint8Array): Uint8Array { + return concatBytes(ED25519_PUBKEY_DER_PREFIX, rawPubKey) +} + +export function decodePubKeyEd25519(der: Uint8Array): Uint8Array { + if (der.length !== 44) throw new Error("decodePubKeyEd25519: invalid length") + for (let i = 0; i < ED25519_PUBKEY_DER_PREFIX.length; i++) { + if (der[i] !== ED25519_PUBKEY_DER_PREFIX[i]) throw new Error("decodePubKeyEd25519: invalid DER prefix") + } + return der.subarray(12) +} + +export function encodePubKeyX25519(rawPubKey: Uint8Array): Uint8Array { + return concatBytes(X25519_PUBKEY_DER_PREFIX, rawPubKey) +} + +export function decodePubKeyX25519(der: Uint8Array): Uint8Array { + if (der.length !== 44) throw new Error("decodePubKeyX25519: invalid length") + for (let i = 0; i < X25519_PUBKEY_DER_PREFIX.length; i++) { + if (der[i] !== X25519_PUBKEY_DER_PREFIX[i]) throw new Error("decodePubKeyX25519: invalid DER prefix") + } + return der.subarray(12) +} + +// -- DER encoding for Ed448 public keys (RFC 8410, SubjectPublicKeyInfo) +// SEQUENCE { SEQUENCE { OID 1.3.101.113 } BIT STRING { 0x00 <57 bytes> } } + +const ED448_PUBKEY_DER_PREFIX = new Uint8Array([ + 0x30, 0x43, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x71, 0x03, 0x3a, 0x00, +]) + +export function encodePubKeyEd448(rawPubKey: Uint8Array): Uint8Array { + return concatBytes(ED448_PUBKEY_DER_PREFIX, rawPubKey) +} + +export function decodePubKeyEd448(der: Uint8Array): Uint8Array { + if (der.length !== 69) throw new Error("decodePubKeyEd448: invalid length") + for (let i = 0; i < ED448_PUBKEY_DER_PREFIX.length; i++) { + if (der[i] !== ED448_PUBKEY_DER_PREFIX[i]) throw new Error("decodePubKeyEd448: invalid DER prefix") + } + return der.subarray(12) +} + +// -- Ed448 verification via @noble/curves (Crypto.hs:1270 verify') + +export function verifyEd448(publicKey: Uint8Array, sig: Uint8Array, msg: Uint8Array): boolean { + try { + return ed448.verify(sig, msg, publicKey) + } catch { + return false + } +} + +// -- DER encoding for private keys (PKCS8 OneAsymmetricKey, RFC 8410) +// SEQUENCE { INTEGER 0, SEQUENCE { OID }, OCTET STRING { OCTET STRING { <32 bytes> } } } + +const ED25519_PRIVKEY_DER_PREFIX = new Uint8Array([ + 0x30, 0x2e, 0x02, 0x01, 0x00, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x04, 0x22, 0x04, 0x20, +]) + +const X25519_PRIVKEY_DER_PREFIX = new Uint8Array([ + 0x30, 0x2e, 0x02, 0x01, 0x00, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x6e, 0x04, 0x22, 0x04, 0x20, +]) + +export function encodePrivKeyEd25519(privateKey: Uint8Array): Uint8Array { + // privateKey is 64 bytes (libsodium: seed || pubkey), seed is first 32 bytes + const seed = privateKey.subarray(0, 32) + return concatBytes(ED25519_PRIVKEY_DER_PREFIX, seed) +} + +export function decodePrivKeyEd25519(der: Uint8Array): Uint8Array { + if (der.length !== 48) throw new Error("decodePrivKeyEd25519: invalid length") + for (let i = 0; i < ED25519_PRIVKEY_DER_PREFIX.length; i++) { + if (der[i] !== ED25519_PRIVKEY_DER_PREFIX[i]) throw new Error("decodePrivKeyEd25519: invalid DER prefix") + } + // Returns 32-byte seed; call ed25519KeyPairFromSeed to get full keypair. + return der.subarray(16) +} + +export function encodePrivKeyX25519(privateKey: Uint8Array): Uint8Array { + return concatBytes(X25519_PRIVKEY_DER_PREFIX, privateKey) +} + +export function decodePrivKeyX25519(der: Uint8Array): Uint8Array { + if (der.length !== 48) throw new Error("decodePrivKeyX25519: invalid length") + for (let i = 0; i < X25519_PRIVKEY_DER_PREFIX.length; i++) { + if (der[i] !== X25519_PRIVKEY_DER_PREFIX[i]) throw new Error("decodePrivKeyX25519: invalid DER prefix") + } + return der.subarray(16) +} + +// -- KeyHash: SHA-256 of DER-encoded public key (Crypto.hs:981) + +export function keyHash(derPubKey: Uint8Array): Uint8Array { + return sha256(derPubKey) +} diff --git a/xftp-web/src/crypto/padding.ts b/xftp-web/src/crypto/padding.ts new file mode 100644 index 0000000000..29c6790954 --- /dev/null +++ b/xftp-web/src/crypto/padding.ts @@ -0,0 +1,61 @@ +// Block padding matching Simplex.Messaging.Crypto (strict) and Simplex.Messaging.Crypto.Lazy. +// Strict: 2-byte BE length prefix + message + '#' fill. +// Lazy: 8-byte Int64 length prefix + message + '#' fill. + +import {encodeWord16, decodeWord16, encodeInt64, decodeInt64, Decoder} from "../protocol/encoding.js" + +const HASH = 0x23 // '#' + +// -- Strict pad/unPad (protocol messages) — Crypto.hs:1077 + +export function pad(msg: Uint8Array, paddedLen: number): Uint8Array { + const len = msg.length + if (len > 65535) throw new Error("pad: message too large for Word16 length") + const fillLen = paddedLen - len - 2 + if (fillLen < 0) throw new Error("pad: message exceeds padded size") + const result = new Uint8Array(paddedLen) + const lenBytes = encodeWord16(len) + result.set(lenBytes, 0) + result.set(msg, 2) + result.fill(HASH, 2 + len) + return result +} + +export function unPad(padded: Uint8Array): Uint8Array { + if (padded.length < 2) throw new Error("unPad: input too short") + const d = new Decoder(padded) + const len = decodeWord16(d) + if (padded.length - 2 < len) throw new Error("unPad: invalid length") + return padded.subarray(2, 2 + len) +} + +// -- Lazy pad/unPad (file encryption) — Crypto/Lazy.hs:70 + +export function padLazy(msg: Uint8Array, msgLen: bigint, padLen: bigint): Uint8Array { + const fillLen = padLen - msgLen - 8n + if (fillLen < 0n) throw new Error("padLazy: message exceeds padded size") + const totalLen = Number(padLen) + const result = new Uint8Array(totalLen) + const lenBytes = encodeInt64(msgLen) + result.set(lenBytes, 0) + result.set(msg.subarray(0, Number(msgLen)), 8) + result.fill(HASH, 8 + Number(msgLen)) + return result +} + +export function unPadLazy(padded: Uint8Array): Uint8Array { + return splitLen(padded).content +} + +// splitLen: extract 8-byte Int64 length and content — Crypto/Lazy.hs:96 +// Does not fail if content is shorter than declared length (for chunked decryption). +export function splitLen(data: Uint8Array): {len: bigint; content: Uint8Array} { + if (data.length < 8) throw new Error("splitLen: input too short") + const d = new Decoder(data) + const len = decodeInt64(d) + if (len < 0n) throw new Error("splitLen: negative length") + const numLen = Number(len) + const available = data.length - 8 + const takeLen = Math.min(numLen, available) + return {len, content: data.subarray(8, 8 + takeLen)} +} diff --git a/xftp-web/src/crypto/secretbox.ts b/xftp-web/src/crypto/secretbox.ts new file mode 100644 index 0000000000..48ca94da00 --- /dev/null +++ b/xftp-web/src/crypto/secretbox.ts @@ -0,0 +1,219 @@ +// Streaming XSalsa20-Poly1305 — Simplex.Messaging.Crypto / Crypto.Lazy +// +// Libsodium-wrappers-sumo does not expose crypto_stream_xsalsa20_xor_ic, +// so the Salsa20/20 stream cipher core is implemented here. +// HSalsa20 uses libsodium's crypto_core_hsalsa20. +// Poly1305 uses libsodium's streaming crypto_onetimeauth_* API. + +import sodium, {StateAddress} from "libsodium-wrappers-sumo" +import {concatBytes} from "../protocol/encoding.js" +import {pad, unPad, padLazy, unPadLazy} from "./padding.js" + +// crypto_core_hsalsa20 exists at runtime but is missing from @types/libsodium-wrappers-sumo +const _sodium = sodium as unknown as { + crypto_core_hsalsa20(input: Uint8Array, key: Uint8Array, constant?: Uint8Array): Uint8Array +} & typeof sodium + +// ── Salsa20/20 stream cipher core ─────────────────────────────── + +function readU32LE(buf: Uint8Array, off: number): number { + return ((buf[off] | (buf[off + 1] << 8) | (buf[off + 2] << 16) | (buf[off + 3] << 24)) >>> 0) +} + +function writeU32LE(buf: Uint8Array, off: number, val: number): void { + buf[off] = val & 0xff + buf[off + 1] = (val >>> 8) & 0xff + buf[off + 2] = (val >>> 16) & 0xff + buf[off + 3] = (val >>> 24) & 0xff +} + +function rotl32(v: number, n: number): number { + return ((v << n) | (v >>> (32 - n))) >>> 0 +} + +const SIGMA_0 = 0x61707865 +const SIGMA_1 = 0x3320646e +const SIGMA_2 = 0x79622d32 +const SIGMA_3 = 0x6b206574 + +function salsa20Block(key: Uint8Array, nonce8: Uint8Array, counter: number): Uint8Array { + const k0 = readU32LE(key, 0), k1 = readU32LE(key, 4) + const k2 = readU32LE(key, 8), k3 = readU32LE(key, 12) + const k4 = readU32LE(key, 16), k5 = readU32LE(key, 20) + const k6 = readU32LE(key, 24), k7 = readU32LE(key, 28) + const n0 = readU32LE(nonce8, 0), n1 = readU32LE(nonce8, 4) + + const s0 = SIGMA_0, s1 = k0, s2 = k1, s3 = k2 + const s4 = k3, s5 = SIGMA_1, s6 = n0, s7 = n1 + const s8 = counter >>> 0, s9 = 0, s10 = SIGMA_2, s11 = k4 + const s12 = k5, s13 = k6, s14 = k7, s15 = SIGMA_3 + + let x0 = s0, x1 = s1, x2 = s2, x3 = s3 + let x4 = s4, x5 = s5, x6 = s6, x7 = s7 + let x8 = s8, x9 = s9, x10 = s10, x11 = s11 + let x12 = s12, x13 = s13, x14 = s14, x15 = s15 + + for (let i = 0; i < 10; i++) { + // Column round + x4 ^= rotl32((x0 + x12) >>> 0, 7); x8 ^= rotl32((x4 + x0) >>> 0, 9) + x12 ^= rotl32((x8 + x4) >>> 0, 13); x0 ^= rotl32((x12 + x8) >>> 0, 18) + x9 ^= rotl32((x5 + x1) >>> 0, 7); x13 ^= rotl32((x9 + x5) >>> 0, 9) + x1 ^= rotl32((x13 + x9) >>> 0, 13); x5 ^= rotl32((x1 + x13) >>> 0, 18) + x14 ^= rotl32((x10 + x6) >>> 0, 7); x2 ^= rotl32((x14 + x10) >>> 0, 9) + x6 ^= rotl32((x2 + x14) >>> 0, 13); x10 ^= rotl32((x6 + x2) >>> 0, 18) + x3 ^= rotl32((x15 + x11) >>> 0, 7); x7 ^= rotl32((x3 + x15) >>> 0, 9) + x11 ^= rotl32((x7 + x3) >>> 0, 13); x15 ^= rotl32((x11 + x7) >>> 0, 18) + // Row round + x1 ^= rotl32((x0 + x3) >>> 0, 7); x2 ^= rotl32((x1 + x0) >>> 0, 9) + x3 ^= rotl32((x2 + x1) >>> 0, 13); x0 ^= rotl32((x3 + x2) >>> 0, 18) + x6 ^= rotl32((x5 + x4) >>> 0, 7); x7 ^= rotl32((x6 + x5) >>> 0, 9) + x4 ^= rotl32((x7 + x6) >>> 0, 13); x5 ^= rotl32((x4 + x7) >>> 0, 18) + x11 ^= rotl32((x10 + x9) >>> 0, 7); x8 ^= rotl32((x11 + x10) >>> 0, 9) + x9 ^= rotl32((x8 + x11) >>> 0, 13); x10 ^= rotl32((x9 + x8) >>> 0, 18) + x12 ^= rotl32((x15 + x14) >>> 0, 7); x13 ^= rotl32((x12 + x15) >>> 0, 9) + x14 ^= rotl32((x13 + x12) >>> 0, 13); x15 ^= rotl32((x14 + x13) >>> 0, 18) + } + + const out = new Uint8Array(64) + writeU32LE(out, 0, (x0 + s0) >>> 0); writeU32LE(out, 4, (x1 + s1) >>> 0) + writeU32LE(out, 8, (x2 + s2) >>> 0); writeU32LE(out, 12, (x3 + s3) >>> 0) + writeU32LE(out, 16, (x4 + s4) >>> 0); writeU32LE(out, 20, (x5 + s5) >>> 0) + writeU32LE(out, 24, (x6 + s6) >>> 0); writeU32LE(out, 28, (x7 + s7) >>> 0) + writeU32LE(out, 32, (x8 + s8) >>> 0); writeU32LE(out, 36, (x9 + s9) >>> 0) + writeU32LE(out, 40, (x10 + s10) >>> 0); writeU32LE(out, 44, (x11 + s11) >>> 0) + writeU32LE(out, 48, (x12 + s12) >>> 0); writeU32LE(out, 52, (x13 + s13) >>> 0) + writeU32LE(out, 56, (x14 + s14) >>> 0); writeU32LE(out, 60, (x15 + s15) >>> 0) + return out +} + +// ── Streaming state ───────────────────────────────────────────── + +export interface SbState { + _subkey: Uint8Array + _nonce8: Uint8Array + _counter: number + _ksBuf: Uint8Array + _ksOff: number + _authState: StateAddress +} + +export function sbInit(key: Uint8Array, nonce: Uint8Array): SbState { + // Double HSalsa20 cascade matching Haskell cryptonite XSalsa20 (Crypto.hs:xSalsa20): + // subkey1 = HSalsa20(key, zeros16) + // subkey2 = HSalsa20(subkey1, nonce[0:16]) + // keystream = Salsa20(subkey2, nonce[16:24]) + const zeros16 = new Uint8Array(16) + const subkey1 = _sodium.crypto_core_hsalsa20(zeros16, key) + const subkey = _sodium.crypto_core_hsalsa20(nonce.subarray(0, 16), subkey1) + const nonce8 = new Uint8Array(nonce.subarray(16, 24)) + const block0 = salsa20Block(subkey, nonce8, 0) + const poly1305Key = block0.subarray(0, 32) + const ksBuf = new Uint8Array(block0.subarray(32)) + const authState = sodium.crypto_onetimeauth_init(poly1305Key) + return {_subkey: subkey, _nonce8: nonce8, _counter: 1, _ksBuf: ksBuf, _ksOff: 0, _authState: authState} +} + +export function cbInit(dhSecret: Uint8Array, nonce: Uint8Array): SbState { + return sbInit(dhSecret, nonce) +} + +export function sbEncryptChunk(state: SbState, chunk: Uint8Array): Uint8Array { + const cipher = xorKeystream(state, chunk) + sodium.crypto_onetimeauth_update(state._authState, cipher) + return cipher +} + +export function sbDecryptChunk(state: SbState, chunk: Uint8Array): Uint8Array { + sodium.crypto_onetimeauth_update(state._authState, chunk) + return xorKeystream(state, chunk) +} + +export function sbAuth(state: SbState): Uint8Array { + return sodium.crypto_onetimeauth_final(state._authState) +} + +// ── High-level: tail tag (tag appended) ───────────────────────── + +export function sbEncryptTailTag( + key: Uint8Array, nonce: Uint8Array, + data: Uint8Array, len: bigint, padLen: bigint +): Uint8Array { + const padded = padLazy(data, len, padLen) + const state = sbInit(key, nonce) + const cipher = sbEncryptChunk(state, padded) + const tag = sbAuth(state) + return concatBytes(cipher, tag) +} + +export function sbDecryptTailTag( + key: Uint8Array, nonce: Uint8Array, + paddedLen: bigint, data: Uint8Array +): {valid: boolean; content: Uint8Array} { + const pLen = Number(paddedLen) + const cipher = data.subarray(0, pLen) + const providedTag = data.subarray(pLen) + const state = sbInit(key, nonce) + const plaintext = sbDecryptChunk(state, cipher) + const computedTag = sbAuth(state) + const valid = providedTag.length === 16 && constantTimeEqual(providedTag, computedTag) + const content = unPadLazy(plaintext) + return {valid, content} +} + +// ── Tag-prepended secretbox (Haskell Crypto.hs:cryptoBox) ─────── + +export function cryptoBox(key: Uint8Array, nonce: Uint8Array, msg: Uint8Array): Uint8Array { + const state = sbInit(key, nonce) + const cipher = sbEncryptChunk(state, msg) + const tag = sbAuth(state) + return concatBytes(tag, cipher) +} + +export function cbEncrypt( + dhSecret: Uint8Array, nonce: Uint8Array, + msg: Uint8Array, padLen: number +): Uint8Array { + return cryptoBox(dhSecret, nonce, pad(msg, padLen)) +} + +export function cbDecrypt( + dhSecret: Uint8Array, nonce: Uint8Array, + packet: Uint8Array +): Uint8Array { + const tag = packet.subarray(0, 16) + const cipher = packet.subarray(16) + const state = sbInit(dhSecret, nonce) + const plaintext = sbDecryptChunk(state, cipher) + const computedTag = sbAuth(state) + if (!constantTimeEqual(tag, computedTag)) throw new Error("secretbox: authentication failed") + return unPad(plaintext) +} + +// ── Internal ──────────────────────────────────────────────────── + +function xorKeystream(state: SbState, data: Uint8Array): Uint8Array { + const result = new Uint8Array(data.length) + let off = 0 + while (off < data.length) { + if (state._ksOff >= state._ksBuf.length) { + state._ksBuf = salsa20Block(state._subkey, state._nonce8, state._counter++) + state._ksOff = 0 + } + const available = state._ksBuf.length - state._ksOff + const needed = data.length - off + const n = Math.min(available, needed) + for (let i = 0; i < n; i++) { + result[off + i] = data[off + i] ^ state._ksBuf[state._ksOff + i] + } + state._ksOff += n + off += n + } + return result +} + +function constantTimeEqual(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + let diff = 0 + for (let i = 0; i < a.length; i++) diff |= a[i] ^ b[i] + return diff === 0 +} diff --git a/xftp-web/src/download.ts b/xftp-web/src/download.ts new file mode 100644 index 0000000000..35c4395b1d --- /dev/null +++ b/xftp-web/src/download.ts @@ -0,0 +1,75 @@ +// XFTP download pipeline — integration of protocol + crypto layers. +// +// Ties together: DH key exchange (keys), transport decryption (client), +// file-level decryption (file), chunk sizing (chunks), digest verification. +// +// Usage: +// 1. Parse FileDescription from YAML (description.ts) +// 2. For each chunk replica: +// a. generateX25519KeyPair() → ephemeral DH keypair +// b. encodeFGET(dhPub) → FGET command +// c. encodeAuthTransmission(...) → padded block (send to server) +// d. decodeTransmission(responseBlock) → raw response +// e. decodeResponse(raw) → FRFile { rcvDhKey, nonce } +// f. processFileResponse(rcvPrivKey, rcvDhKey, nonce) → dhSecret +// g. decryptReceivedChunk(dhSecret, nonce, encData, digest) → plaintext +// 3. processDownloadedFile(fd, plaintextChunks) → { header, content } + +import {dh} from "./crypto/keys.js" +import {sha256} from "./crypto/digest.js" +import {decryptChunks, type FileHeader} from "./crypto/file.js" +import {decryptTransportChunk} from "./protocol/client.js" +import type {FileDescription} from "./protocol/description.js" + +// ── Process FRFile response ───────────────────────────────────── + +// Derive transport decryption secret from FRFile response parameters. +// Uses DH(serverDhKey, recipientPrivKey) to produce shared secret. +export function processFileResponse( + recipientPrivKey: Uint8Array, // Ephemeral X25519 private key (32 bytes) + serverDhKey: Uint8Array, // rcvDhKey from FRFile response (32 bytes) +): Uint8Array { + return dh(serverDhKey, recipientPrivKey) +} + +// ── Decrypt a single received chunk ───────────────────────────── + +// Decrypt transport-encrypted chunk data and verify SHA-256 digest. +// Returns decrypted content or throws on auth tag / digest failure. +export function decryptReceivedChunk( + dhSecret: Uint8Array, + cbNonce: Uint8Array, + encData: Uint8Array, + expectedDigest: Uint8Array | null +): Uint8Array { + const {valid, content} = decryptTransportChunk(dhSecret, cbNonce, encData) + if (!valid) throw new Error("transport auth tag verification failed") + if (expectedDigest !== null) { + const actual = sha256(content) + if (!digestEqual(actual, expectedDigest)) { + throw new Error("chunk digest mismatch") + } + } + return content +} + +// ── Full download pipeline ────────────────────────────────────── + +// Process downloaded file: concatenate transport-decrypted chunks, +// then file-level decrypt using key/nonce from file description. +// Returns parsed FileHeader and file content. +export function processDownloadedFile( + fd: FileDescription, + plaintextChunks: Uint8Array[] +): {header: FileHeader, content: Uint8Array} { + return decryptChunks(BigInt(fd.size), plaintextChunks, fd.key, fd.nonce) +} + +// ── Internal ──────────────────────────────────────────────────── + +function digestEqual(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + let diff = 0 + for (let i = 0; i < a.length; i++) diff |= a[i] ^ b[i] + return diff === 0 +} diff --git a/xftp-web/src/protocol/address.ts b/xftp-web/src/protocol/address.ts new file mode 100644 index 0000000000..a66f23927e --- /dev/null +++ b/xftp-web/src/protocol/address.ts @@ -0,0 +1,54 @@ +// XFTP server address parsing/formatting — Simplex.Messaging.Protocol (ProtocolServer) +// +// Parses/formats server address strings of the form: +// xftp://@[,,...][:] +// +// KeyHash is base64url-encoded SHA-256 fingerprint of the identity certificate. + +import {base64urlEncode} from "./description.js" + +export interface XFTPServer { + keyHash: Uint8Array // 32-byte SHA-256 fingerprint (decoded from base64url) + host: string // primary hostname + port: string // port number (default "443") +} + +// Decode base64url (RFC 4648 §5) to Uint8Array. +function base64urlDecode(s: string): Uint8Array { + // Convert base64url to standard base64 + let b64 = s.replace(/-/g, '+').replace(/_/g, '/') + // Add padding if needed + while (b64.length % 4 !== 0) b64 += '=' + const bin = atob(b64) + const bytes = new Uint8Array(bin.length) + for (let i = 0; i < bin.length; i++) bytes[i] = bin.charCodeAt(i) + return bytes +} + +// Parse an XFTP server address string. +// Format: xftp://@[,,...][:] +export function parseXFTPServer(address: string): XFTPServer { + const m = address.match(/^xftp:\/\/([A-Za-z0-9_-]+={0,2})@(.+)$/) + if (!m) throw new Error("parseXFTPServer: invalid address format") + const keyHash = base64urlDecode(m[1]) + if (keyHash.length !== 32) throw new Error("parseXFTPServer: keyHash must be 32 bytes") + const hostPart = m[2] + // Take the first host (before any comma), then split port from that + const firstHost = hostPart.split(',')[0] + const colonIdx = firstHost.lastIndexOf(':') + let host: string + let port: string + if (colonIdx > 0) { + host = firstHost.substring(0, colonIdx) + port = firstHost.substring(colonIdx + 1) + } else { + host = firstHost + port = "443" + } + return {keyHash, host, port} +} + +// Format an XFTPServer back to its URI string representation. +export function formatXFTPServer(srv: XFTPServer): string { + return "xftp://" + base64urlEncode(srv.keyHash) + "@" + srv.host + ":" + srv.port +} diff --git a/xftp-web/src/protocol/chunks.ts b/xftp-web/src/protocol/chunks.ts new file mode 100644 index 0000000000..db2bbe7635 --- /dev/null +++ b/xftp-web/src/protocol/chunks.ts @@ -0,0 +1,86 @@ +// XFTP chunk sizing — Simplex.FileTransfer.Chunks + Client +// +// Computes chunk sizes for file uploads, chunk specifications with offsets, +// and per-chunk SHA-256 digests. + +import {kb, mb} from "./description.js" +import {sha256} from "../crypto/digest.js" + +// ── Chunk size constants (Simplex.FileTransfer.Chunks) ────────── + +export const chunkSize0 = kb(64) // 65536 +export const chunkSize1 = kb(256) // 262144 +export const chunkSize2 = mb(1) // 1048576 +export const chunkSize3 = mb(4) // 4194304 + +export const serverChunkSizes = [chunkSize0, chunkSize1, chunkSize2, chunkSize3] + +// ── Size constants ────────────────────────────────────────────── + +export const fileSizeLen = 8 // 64-bit file size prefix (padLazy) +export const authTagSize = 16 // Poly1305 authentication tag + +// ── Chunk sizing (Simplex.FileTransfer.Client.prepareChunkSizes) ─ + +function size34(sz: number): number { + return Math.floor((sz * 3) / 4) +} + +export function prepareChunkSizes(payloadSize: number): number[] { + let smallSize: number, bigSize: number + if (payloadSize > size34(chunkSize3)) { + smallSize = chunkSize2; bigSize = chunkSize3 + } else if (payloadSize > size34(chunkSize2)) { + smallSize = chunkSize1; bigSize = chunkSize2 + } else { + smallSize = chunkSize0; bigSize = chunkSize1 + } + function prepareSizes(size: number): number[] { + if (size === 0) return [] + if (size >= bigSize) { + const n1 = Math.floor(size / bigSize) + const remSz = size % bigSize + return new Array(n1).fill(bigSize).concat(prepareSizes(remSz)) + } + if (size > size34(bigSize)) return [bigSize] + const n2 = Math.floor(size / smallSize) + const remSz2 = size % smallSize + return new Array(remSz2 === 0 ? n2 : n2 + 1).fill(smallSize) + } + return prepareSizes(payloadSize) +} + +// Find the smallest server chunk size that fits the payload. +// Returns null if payload exceeds the largest chunk size. +// Matches Haskell singleChunkSize. +export function singleChunkSize(payloadSize: number): number | null { + for (const sz of serverChunkSizes) { + if (payloadSize <= sz) return sz + } + return null +} + +// ── Chunk specs ───────────────────────────────────────────────── + +export interface ChunkSpec { + chunkOffset: number + chunkSize: number +} + +// Generate chunk specifications with byte offsets. +// Matches Haskell prepareChunkSpecs (without filePath). +export function prepareChunkSpecs(chunkSizes: number[]): ChunkSpec[] { + const specs: ChunkSpec[] = [] + let offset = 0 + for (const size of chunkSizes) { + specs.push({chunkOffset: offset, chunkSize: size}) + offset += size + } + return specs +} + +// ── Chunk digest ──────────────────────────────────────────────── + +export function getChunkDigest(chunk: Uint8Array): Uint8Array { + return sha256(chunk) +} diff --git a/xftp-web/src/protocol/client.ts b/xftp-web/src/protocol/client.ts new file mode 100644 index 0000000000..e5a3c2d700 --- /dev/null +++ b/xftp-web/src/protocol/client.ts @@ -0,0 +1,95 @@ +// XFTP client protocol operations — Simplex.FileTransfer.Client + Crypto +// +// CbAuthenticator-based command authentication and transport-level +// chunk encryption/decryption for XFTP downloads. + +import {concatBytes} from "./encoding.js" +import {dh} from "../crypto/keys.js" +import {sha512} from "../crypto/digest.js" +import { + cbInit, sbEncryptChunk, sbDecryptChunk, sbAuth, cryptoBox +} from "../crypto/secretbox.js" + +// ── Constants ─────────────────────────────────────────────────── + +export const cbAuthenticatorSize = 80 // SHA512 (64) + authTag (16) + +// ── CbAuthenticator (Crypto.hs:cbAuthenticate) ───────────────── + +// Create crypto_box authenticator for a message. +// Encrypts sha512(msg) with NaCl crypto_box using DH(peerPubKey, ownPrivKey). +// Returns 80 bytes (16-byte tag prepended + 64-byte encrypted hash). +export function cbAuthenticate( + peerPubKey: Uint8Array, + ownPrivKey: Uint8Array, + nonce: Uint8Array, + msg: Uint8Array +): Uint8Array { + const dhSecret = dh(peerPubKey, ownPrivKey) + const hash = sha512(msg) + return cryptoBox(dhSecret, nonce, hash) +} + +// Verify crypto_box authenticator for a message. +// Decrypts authenticator with DH(peerPubKey, ownPrivKey), checks against sha512(msg). +export function cbVerify( + peerPubKey: Uint8Array, + ownPrivKey: Uint8Array, + nonce: Uint8Array, + authenticator: Uint8Array, + msg: Uint8Array +): boolean { + if (authenticator.length !== cbAuthenticatorSize) return false + const dhSecret = dh(peerPubKey, ownPrivKey) + const tag = authenticator.subarray(0, 16) + const cipher = authenticator.subarray(16) + const state = cbInit(dhSecret, nonce) + const plaintext = sbDecryptChunk(state, cipher) + const computedTag = sbAuth(state) + if (!constantTimeEqual(tag, computedTag)) return false + const expectedHash = sha512(msg) + return constantTimeEqual(plaintext, expectedHash) +} + +// ── Transport-level chunk encryption/decryption ───────────────── + +// Encrypt a chunk for transport (tag-appended format). +// Matches sendEncFile in FileTransfer.Transport: +// ciphertext streamed via sbEncryptChunk, then 16-byte auth tag appended. +export function encryptTransportChunk( + dhSecret: Uint8Array, + cbNonce: Uint8Array, + plainData: Uint8Array +): Uint8Array { + const state = cbInit(dhSecret, cbNonce) + const cipher = sbEncryptChunk(state, plainData) + const tag = sbAuth(state) + return concatBytes(cipher, tag) +} + +// Decrypt a transport-encrypted chunk (tag-appended format). +// Matches receiveEncFile / receiveSbFile in FileTransfer.Transport: +// ciphertext decrypted via sbDecryptChunk, then 16-byte auth tag verified. +export function decryptTransportChunk( + dhSecret: Uint8Array, + cbNonce: Uint8Array, + encData: Uint8Array +): {valid: boolean, content: Uint8Array} { + if (encData.length < 16) return {valid: false, content: new Uint8Array(0)} + const cipher = encData.subarray(0, encData.length - 16) + const providedTag = encData.subarray(encData.length - 16) + const state = cbInit(dhSecret, cbNonce) + const plaintext = sbDecryptChunk(state, cipher) + const computedTag = sbAuth(state) + const valid = constantTimeEqual(providedTag, computedTag) + return {valid, content: plaintext} +} + +// ── Internal ──────────────────────────────────────────────────── + +function constantTimeEqual(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + let diff = 0 + for (let i = 0; i < a.length; i++) diff |= a[i] ^ b[i] + return diff === 0 +} diff --git a/xftp-web/src/protocol/commands.ts b/xftp-web/src/protocol/commands.ts new file mode 100644 index 0000000000..9f5d56d4f6 --- /dev/null +++ b/xftp-web/src/protocol/commands.ts @@ -0,0 +1,158 @@ +// Protocol commands and responses — Simplex.FileTransfer.Protocol +// +// Commands (client -> server): FNEW, FADD, FPUT, FDEL, FGET, FACK, PING +// Responses (server -> client): SIDS, RIDS, FILE, OK, ERR, PONG + +import { + Decoder, concatBytes, + encodeBytes, decodeBytes, + encodeWord32, + encodeNonEmpty, decodeNonEmpty, + encodeMaybe +} from "./encoding.js" +import {decodePubKeyX25519} from "../crypto/keys.js" + +// ── Types ───────────────────────────────────────────────────────── + +export interface FileInfo { + sndKey: Uint8Array // DER-encoded Ed25519 public key (44 bytes) + size: number // Word32 + digest: Uint8Array // SHA-256 digest (32 bytes) +} + +export type CommandError = "UNKNOWN" | "SYNTAX" | "PROHIBITED" | "NO_AUTH" | "HAS_AUTH" | "NO_ENTITY" + +export type XFTPErrorType = + | {type: "BLOCK"} | {type: "SESSION"} | {type: "HANDSHAKE"} + | {type: "CMD", cmdErr: CommandError} + | {type: "AUTH"} + | {type: "BLOCKED", blockInfo: string} + | {type: "SIZE"} | {type: "QUOTA"} | {type: "DIGEST"} | {type: "CRYPTO"} + | {type: "NO_FILE"} | {type: "HAS_FILE"} | {type: "FILE_IO"} + | {type: "TIMEOUT"} | {type: "INTERNAL"} + +export type FileResponse = + | {type: "FRSndIds", senderId: Uint8Array, recipientIds: Uint8Array[]} + | {type: "FRRcvIds", recipientIds: Uint8Array[]} + | {type: "FRFile", rcvDhKey: Uint8Array, nonce: Uint8Array} + | {type: "FROk"} + | {type: "FRErr", err: XFTPErrorType} + | {type: "FRPong"} + +// ── FileInfo encoding ───────────────────────────────────────────── + +// smpEncode FileInfo {sndKey, size, digest} = smpEncode (sndKey, size, digest) +export function encodeFileInfo(fi: FileInfo): Uint8Array { + return concatBytes(encodeBytes(fi.sndKey), encodeWord32(fi.size), encodeBytes(fi.digest)) +} + +// ── Command encoding (encodeProtocol) ───────────────────────────── + +const SPACE = new Uint8Array([0x20]) + +function ascii(s: string): Uint8Array { + const buf = new Uint8Array(s.length) + for (let i = 0; i < s.length; i++) buf[i] = s.charCodeAt(i) + return buf +} + +export function encodeFNEW(file: FileInfo, rcvKeys: Uint8Array[], auth: Uint8Array | null): Uint8Array { + return concatBytes( + ascii("FNEW"), SPACE, + encodeFileInfo(file), + encodeNonEmpty(encodeBytes, rcvKeys), + encodeMaybe(encodeBytes, auth) + ) +} + +export function encodeFADD(rcvKeys: Uint8Array[]): Uint8Array { + return concatBytes(ascii("FADD"), SPACE, encodeNonEmpty(encodeBytes, rcvKeys)) +} + +export function encodeFPUT(): Uint8Array { return ascii("FPUT") } + +export function encodeFDEL(): Uint8Array { return ascii("FDEL") } + +export function encodeFGET(rcvDhKey: Uint8Array): Uint8Array { + return concatBytes(ascii("FGET"), SPACE, encodeBytes(rcvDhKey)) +} + +export function encodeFACK(): Uint8Array { return ascii("FACK") } + +export function encodePING(): Uint8Array { return ascii("PING") } + +// ── Response decoding ───────────────────────────────────────────── + +function readTag(d: Decoder): string { + const start = d.offset() + while (d.remaining() > 0) { + if (d.buf[d.offset()] === 0x20 || d.buf[d.offset()] === 0x0a) break + d.anyByte() + } + let s = "" + for (let i = start; i < d.offset(); i++) s += String.fromCharCode(d.buf[i]) + return s +} + +function readSpace(d: Decoder): void { + if (d.anyByte() !== 0x20) throw new Error("expected space") +} + +function decodeCommandError(s: string): CommandError { + if (s === "UNKNOWN" || s === "SYNTAX" || s === "PROHIBITED" || s === "NO_AUTH" || s === "HAS_AUTH" || s === "NO_ENTITY") return s + if (s === "NO_QUEUE") return "NO_ENTITY" + throw new Error("bad CommandError: " + s) +} + +export function decodeXFTPError(d: Decoder): XFTPErrorType { + const s = readTag(d) + switch (s) { + case "BLOCK": return {type: "BLOCK"} + case "SESSION": return {type: "SESSION"} + case "HANDSHAKE": return {type: "HANDSHAKE"} + case "CMD": { readSpace(d); return {type: "CMD", cmdErr: decodeCommandError(readTag(d))} } + case "AUTH": return {type: "AUTH"} + case "BLOCKED": { + readSpace(d) + const rest = d.takeAll() + let info = "" + for (let i = 0; i < rest.length; i++) info += String.fromCharCode(rest[i]) + return {type: "BLOCKED", blockInfo: info} + } + case "SIZE": return {type: "SIZE"} + case "QUOTA": return {type: "QUOTA"} + case "DIGEST": return {type: "DIGEST"} + case "CRYPTO": return {type: "CRYPTO"} + case "NO_FILE": return {type: "NO_FILE"} + case "HAS_FILE": return {type: "HAS_FILE"} + case "FILE_IO": return {type: "FILE_IO"} + case "TIMEOUT": return {type: "TIMEOUT"} + case "INTERNAL": return {type: "INTERNAL"} + default: throw new Error("bad XFTPErrorType: " + s) + } +} + +export function decodeResponse(data: Uint8Array): FileResponse { + const d = new Decoder(data) + const tagStr = readTag(d) + switch (tagStr) { + case "SIDS": { + readSpace(d) + return {type: "FRSndIds", senderId: decodeBytes(d), recipientIds: decodeNonEmpty(decodeBytes, d)} + } + case "RIDS": { + readSpace(d) + return {type: "FRRcvIds", recipientIds: decodeNonEmpty(decodeBytes, d)} + } + case "FILE": { + readSpace(d) + const rcvDhKey = decodePubKeyX25519(decodeBytes(d)) + const nonce = d.take(24) + return {type: "FRFile", rcvDhKey, nonce} + } + case "OK": return {type: "FROk"} + case "ERR": { readSpace(d); return {type: "FRErr", err: decodeXFTPError(d)} } + case "PONG": return {type: "FRPong"} + default: throw new Error("unknown response: " + tagStr) + } +} diff --git a/xftp-web/src/protocol/description.ts b/xftp-web/src/protocol/description.ts new file mode 100644 index 0000000000..99d8213c54 --- /dev/null +++ b/xftp-web/src/protocol/description.ts @@ -0,0 +1,363 @@ +// XFTP file description encoding/decoding — Simplex.FileTransfer.Description +// +// Handles YAML-encoded file descriptions matching Haskell Data.Yaml output format. +// Base64url encoding matches Haskell Data.ByteString.Base64.URL.encode (with padding). + +// ── Base64url (RFC 4648 §5) with '=' padding ─────────────────── + +const B64URL = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" +const B64_DECODE = new Uint8Array(128) +B64_DECODE.fill(0xff) +for (let i = 0; i < 64; i++) B64_DECODE[B64URL.charCodeAt(i)] = i + +export function base64urlEncode(data: Uint8Array): string { + let result = "" + const len = data.length + let i = 0 + for (; i + 2 < len; i += 3) { + const b0 = data[i], b1 = data[i + 1], b2 = data[i + 2] + result += B64URL[b0 >>> 2] + result += B64URL[((b0 & 3) << 4) | (b1 >>> 4)] + result += B64URL[((b1 & 15) << 2) | (b2 >>> 6)] + result += B64URL[b2 & 63] + } + if (i < len) { + const b0 = data[i] + result += B64URL[b0 >>> 2] + if (i + 1 < len) { + const b1 = data[i + 1] + result += B64URL[((b0 & 3) << 4) | (b1 >>> 4)] + result += B64URL[(b1 & 15) << 2] + result += "=" + } else { + result += B64URL[(b0 & 3) << 4] + result += "==" + } + } + return result +} + +export function base64urlDecode(s: string): Uint8Array { + let end = s.length + while (end > 0 && s.charCodeAt(end - 1) === 0x3d) end-- // strip '=' + const n = end + const out = new Uint8Array((n * 3) >>> 2) + let j = 0, i = 0 + for (; i + 3 < n; i += 4) { + const a = B64_DECODE[s.charCodeAt(i)], b = B64_DECODE[s.charCodeAt(i + 1)] + const c = B64_DECODE[s.charCodeAt(i + 2)], d = B64_DECODE[s.charCodeAt(i + 3)] + out[j++] = (a << 2) | (b >>> 4) + out[j++] = ((b & 15) << 4) | (c >>> 2) + out[j++] = ((c & 3) << 6) | d + } + if (n - i >= 2) { + const a = B64_DECODE[s.charCodeAt(i)], b = B64_DECODE[s.charCodeAt(i + 1)] + out[j++] = (a << 2) | (b >>> 4) + if (n - i >= 3) { + const c = B64_DECODE[s.charCodeAt(i + 2)] + out[j++] = ((b & 15) << 4) | (c >>> 2) + } + } + return out +} + +// ── FileSize encoding/decoding ────────────────────────────────── + +export const kb = (n: number): number => n * 1024 +export const mb = (n: number): number => n * 1048576 +export const gb = (n: number): number => n * 1073741824 + +export function encodeFileSize(bytes: number): string { + const ks = Math.floor(bytes / 1024) + if (bytes % 1024 !== 0) return String(bytes) + const ms = Math.floor(ks / 1024) + if (ks % 1024 !== 0) return ks + "kb" + const gs = Math.floor(ms / 1024) + if (ms % 1024 !== 0) return ms + "mb" + return gs + "gb" +} + +export function decodeFileSize(s: string): number { + if (s.endsWith("gb")) return parseInt(s) * 1073741824 + if (s.endsWith("mb")) return parseInt(s) * 1048576 + if (s.endsWith("kb")) return parseInt(s) * 1024 + return parseInt(s) +} + +// ── Types ─────────────────────────────────────────────────────── + +export type FileParty = "recipient" | "sender" + +export interface FileDescription { + party: FileParty + size: number // total file size in bytes + digest: Uint8Array // SHA-512 file digest + key: Uint8Array // SbKey (32 bytes) + nonce: Uint8Array // CbNonce (24 bytes) + chunkSize: number // default chunk size in bytes + chunks: FileChunk[] + redirect: RedirectFileInfo | null +} + +export interface RedirectFileInfo { + size: number + digest: Uint8Array +} + +export interface FileChunk { + chunkNo: number + chunkSize: number + digest: Uint8Array + replicas: FileChunkReplica[] +} + +export interface FileChunkReplica { + server: string // XFTPServer URI (e.g. "xftp://abc=@example.com") + replicaId: Uint8Array + replicaKey: Uint8Array // DER-encoded private key +} + +// ── Internal: flat server replica ─────────────────────────────── + +interface FileServerReplica { + chunkNo: number + server: string + replicaId: Uint8Array + replicaKey: Uint8Array + digest: Uint8Array | null + chunkSize: number | null +} + +// ── Server replica colon-separated format ─────────────────────── + +function encodeServerReplica(r: FileServerReplica): string { + let s = r.chunkNo + ":" + base64urlEncode(r.replicaId) + ":" + base64urlEncode(r.replicaKey) + if (r.digest !== null) s += ":" + base64urlEncode(r.digest) + if (r.chunkSize !== null) s += ":" + encodeFileSize(r.chunkSize) + return s +} + +function decodeServerReplica(server: string, s: string): FileServerReplica { + const parts = s.split(":") + if (parts.length < 3) throw new Error("invalid server replica: " + s) + return { + chunkNo: parseInt(parts[0]), + server, + replicaId: base64urlDecode(parts[1]), + replicaKey: base64urlDecode(parts[2]), + digest: parts.length >= 4 ? base64urlDecode(parts[3]) : null, + chunkSize: parts.length >= 5 ? decodeFileSize(parts[4]) : null + } +} + +// ── Unfold chunks to flat replicas ────────────────────────────── + +function unfoldChunksToReplicas(defChunkSize: number, chunks: FileChunk[]): FileServerReplica[] { + const result: FileServerReplica[] = [] + for (const c of chunks) { + c.replicas.forEach((r, idx) => { + result.push({ + chunkNo: c.chunkNo, + server: r.server, + replicaId: r.replicaId, + replicaKey: r.replicaKey, + digest: idx === 0 ? c.digest : null, + chunkSize: c.chunkSize !== defChunkSize && idx === 0 ? c.chunkSize : null + }) + }) + } + return result +} + +// ── Group replicas by server (for YAML encoding) ──────────────── + +function encodeFileReplicas( + defChunkSize: number, chunks: FileChunk[] +): {server: string, chunks: string[]}[] { + const flat = unfoldChunksToReplicas(defChunkSize, chunks) + // Sort by server URI string (matches Haskell Ord for ProtocolServer when + // all servers share the same scheme and keyHash — true for typical use). + flat.sort((a, b) => a.server < b.server ? -1 : a.server > b.server ? 1 : 0) + const groups: {server: string, chunks: string[]}[] = [] + for (const r of flat) { + if (groups.length === 0 || groups[groups.length - 1].server !== r.server) { + groups.push({server: r.server, chunks: [encodeServerReplica(r)]}) + } else { + groups[groups.length - 1].chunks.push(encodeServerReplica(r)) + } + } + return groups +} + +// ── Fold flat replicas back into FileChunks ───────────────────── + +function bytesEqual(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + for (let i = 0; i < a.length; i++) if (a[i] !== b[i]) return false + return true +} + +function foldReplicasToChunks(defChunkSize: number, replicas: FileServerReplica[]): FileChunk[] { + const sizes = new Map() + const digests = new Map() + for (const r of replicas) { + if (r.chunkSize !== null) { + const existing = sizes.get(r.chunkNo) + if (existing !== undefined && existing !== r.chunkSize) + throw new Error("different size in chunk replicas") + sizes.set(r.chunkNo, r.chunkSize) + } + if (r.digest !== null) { + const existing = digests.get(r.chunkNo) + if (existing !== undefined && !bytesEqual(existing, r.digest)) + throw new Error("different digest in chunk replicas") + digests.set(r.chunkNo, r.digest) + } + } + const chunkMap = new Map() + for (const r of replicas) { + const existing = chunkMap.get(r.chunkNo) + if (existing) { + existing.replicas.push({server: r.server, replicaId: r.replicaId, replicaKey: r.replicaKey}) + } else { + const digest = digests.get(r.chunkNo) + if (!digest) throw new Error("no digest for chunk") + chunkMap.set(r.chunkNo, { + chunkNo: r.chunkNo, + chunkSize: sizes.get(r.chunkNo) ?? defChunkSize, + digest, + replicas: [{server: r.server, replicaId: r.replicaId, replicaKey: r.replicaKey}] + }) + } + } + return Array.from(chunkMap.values()).sort((a, b) => a.chunkNo - b.chunkNo) +} + +// ── YAML encoding (matching Data.Yaml key ordering) ───────────── + +export function encodeFileDescription(fd: FileDescription): string { + const lines: string[] = [] + // Top-level keys in alphabetical order (matching Data.Yaml / libyaml) + lines.push("chunkSize: " + encodeFileSize(fd.chunkSize)) + lines.push("digest: " + base64urlEncode(fd.digest)) + lines.push("key: " + base64urlEncode(fd.key)) + lines.push("nonce: " + base64urlEncode(fd.nonce)) + lines.push("party: " + fd.party) + if (fd.redirect !== null) { + lines.push("redirect:") + lines.push(" digest: " + base64urlEncode(fd.redirect.digest)) + lines.push(" size: " + fd.redirect.size) + } + const groups = encodeFileReplicas(fd.chunkSize, fd.chunks) + lines.push("replicas:") + for (const g of groups) { + lines.push("- chunks:") + for (const c of g.chunks) { + lines.push(" - " + c) + } + lines.push(" server: " + g.server) + } + lines.push("size: " + encodeFileSize(fd.size)) + return lines.join("\n") + "\n" +} + +// ── YAML decoding ─────────────────────────────────────────────── + +export function decodeFileDescription(yaml: string): FileDescription { + const lines = yaml.split("\n") + const topLevel: Record = {} + const replicaGroups: {server: string, chunks: string[]}[] = [] + let redirect: RedirectFileInfo | null = null + let i = 0 + while (i < lines.length) { + const line = lines[i] + if (line.length === 0) { i++; continue } + if (line === "replicas:") { + i++ + while (i < lines.length && lines[i].startsWith("- ")) { + const group = {server: "", chunks: [] as string[]} + i = parseReplicaItem(lines, i, group) + replicaGroups.push(group) + } + } else if (line === "redirect:") { + i++ + let digestStr = "", sizeStr = "" + while (i < lines.length && lines[i].startsWith(" ")) { + const kv = lines[i].substring(2) + const ci = kv.indexOf(": ") + if (ci >= 0) { + const k = kv.substring(0, ci), v = kv.substring(ci + 2) + if (k === "digest") digestStr = v + if (k === "size") sizeStr = v + } + i++ + } + redirect = {size: parseInt(sizeStr), digest: base64urlDecode(digestStr)} + } else { + const ci = line.indexOf(": ") + if (ci >= 0) topLevel[line.substring(0, ci)] = line.substring(ci + 2) + i++ + } + } + const chunkSize = decodeFileSize(topLevel["chunkSize"]) + const serverReplicas: FileServerReplica[] = [] + for (const g of replicaGroups) { + for (const c of g.chunks) serverReplicas.push(decodeServerReplica(g.server, c)) + } + return { + party: topLevel["party"] as FileParty, + size: decodeFileSize(topLevel["size"]), + digest: base64urlDecode(topLevel["digest"]), + key: base64urlDecode(topLevel["key"]), + nonce: base64urlDecode(topLevel["nonce"]), + chunkSize, + chunks: foldReplicasToChunks(chunkSize, serverReplicas), + redirect + } +} + +function parseReplicaItem( + lines: string[], startIdx: number, group: {server: string, chunks: string[]} +): number { + let i = startIdx + const first = lines[i].substring(2) // strip "- " prefix + i = parseReplicaField(first, lines, i + 1, group) + while (i < lines.length && lines[i].startsWith(" ") && !lines[i].startsWith("- ")) { + i = parseReplicaField(lines[i].substring(2), lines, i + 1, group) + } + return i +} + +function parseReplicaField( + entry: string, lines: string[], nextIdx: number, + group: {server: string, chunks: string[]} +): number { + if (entry === "chunks:" || entry.startsWith("chunks:")) { + let i = nextIdx + while (i < lines.length && lines[i].startsWith(" - ")) { + group.chunks.push(lines[i].substring(4)) + i++ + } + return i + } + const ci = entry.indexOf(": ") + if (ci >= 0) { + const k = entry.substring(0, ci), v = entry.substring(ci + 2) + if (k === "server") group.server = v + } + return nextIdx +} + +// ── Validation ────────────────────────────────────────────────── + +export function validateFileDescription(fd: FileDescription): string | null { + for (let i = 0; i < fd.chunks.length; i++) { + if (fd.chunks[i].chunkNo !== i + 1) return "chunk numbers are not sequential" + } + let total = 0 + for (const c of fd.chunks) total += c.chunkSize + if (total !== fd.size) return "chunks total size is different than file size" + return null +} + +export const fdSeparator = "################################\n" diff --git a/xftp-web/src/protocol/encoding.ts b/xftp-web/src/protocol/encoding.ts new file mode 100644 index 0000000000..2bbfc97fcb --- /dev/null +++ b/xftp-web/src/protocol/encoding.ts @@ -0,0 +1,230 @@ +// Binary encoding/decoding matching Haskell Simplex.Messaging.Encoding module. +// All multi-byte integers are big-endian (network byte order). + +// -- Decoder: sequential parser over a Uint8Array (equivalent to Attoparsec parser) + +export class Decoder { + readonly buf: Uint8Array + private readonly view: DataView + private pos: number + + constructor(buf: Uint8Array) { + this.buf = buf + this.view = new DataView(buf.buffer, buf.byteOffset, buf.byteLength) + this.pos = 0 + } + + take(n: number): Uint8Array { + if (this.pos + n > this.buf.length) throw new Error("Decoder: unexpected end of input") + const slice = this.buf.subarray(this.pos, this.pos + n) + this.pos += n + return slice + } + + takeAll(): Uint8Array { + const slice = this.buf.subarray(this.pos) + this.pos = this.buf.length + return slice + } + + anyByte(): number { + if (this.pos >= this.buf.length) throw new Error("Decoder: unexpected end of input") + return this.buf[this.pos++] + } + + remaining(): number { + return this.buf.length - this.pos + } + + offset(): number { + return this.pos + } +} + +// -- Utility + +export function concatBytes(...arrays: Uint8Array[]): Uint8Array { + let totalLen = 0 + for (const a of arrays) totalLen += a.length + const result = new Uint8Array(totalLen) + let offset = 0 + for (const a of arrays) { + result.set(a, offset) + offset += a.length + } + return result +} + +// -- Word16: 2-byte big-endian (Encoding.hs:70) + +export function encodeWord16(n: number): Uint8Array { + const buf = new Uint8Array(2) + const view = new DataView(buf.buffer) + view.setUint16(0, n, false) + return buf +} + +export function decodeWord16(d: Decoder): number { + const bytes = d.take(2) + const view = new DataView(bytes.buffer, bytes.byteOffset, bytes.byteLength) + return view.getUint16(0, false) +} + +// -- Word32: 4-byte big-endian (Encoding.hs:76) + +export function encodeWord32(n: number): Uint8Array { + const buf = new Uint8Array(4) + const view = new DataView(buf.buffer) + view.setUint32(0, n, false) + return buf +} + +export function decodeWord32(d: Decoder): number { + const bytes = d.take(4) + const view = new DataView(bytes.buffer, bytes.byteOffset, bytes.byteLength) + return view.getUint32(0, false) +} + +// -- Int64: two Word32s, high then low (Encoding.hs:82) +// Uses BigInt because JS numbers lose precision beyond 2^53. + +export function encodeInt64(n: bigint): Uint8Array { + const high = Number((n >> 32n) & 0xFFFFFFFFn) + const low = Number(n & 0xFFFFFFFFn) + return concatBytes(encodeWord32(high), encodeWord32(low)) +} + +export function decodeInt64(d: Decoder): bigint { + const high = BigInt(decodeWord32(d)) + const low = BigInt(decodeWord32(d)) + const unsigned = (high << 32n) | low + // Convert to signed Int64: if bit 63 is set, value is negative + return unsigned >= 0x8000000000000000n ? unsigned - 0x10000000000000000n : unsigned +} + +// -- ByteString: 1-byte length prefix + bytes (Encoding.hs:100) +// Max 255 bytes. + +export function encodeBytes(bs: Uint8Array): Uint8Array { + if (bs.length > 255) throw new Error("encodeBytes: length exceeds 255") + const result = new Uint8Array(1 + bs.length) + result[0] = bs.length + result.set(bs, 1) + return result +} + +export function decodeBytes(d: Decoder): Uint8Array { + const len = d.anyByte() + return d.take(len) +} + +// -- Large: 2-byte big-endian length prefix + bytes (Encoding.hs:133) +// Max 65535 bytes. + +export function encodeLarge(bs: Uint8Array): Uint8Array { + if (bs.length > 65535) throw new Error("encodeLarge: length exceeds 65535") + return concatBytes(encodeWord16(bs.length), bs) +} + +export function decodeLarge(d: Decoder): Uint8Array { + const len = decodeWord16(d) + return d.take(len) +} + +// -- Tail: raw bytes, no prefix (Encoding.hs:124) + +export function encodeTail(bs: Uint8Array): Uint8Array { + return bs +} + +export function decodeTail(d: Decoder): Uint8Array { + return d.takeAll() +} + +// -- Bool: 'T' (0x54) or 'F' (0x46) (Encoding.hs:58) + +const CHAR_T = 0x54 +const CHAR_F = 0x46 + +export function encodeBool(b: boolean): Uint8Array { + return new Uint8Array([b ? CHAR_T : CHAR_F]) +} + +export function decodeBool(d: Decoder): boolean { + const byte = d.anyByte() + if (byte === CHAR_T) return true + if (byte === CHAR_F) return false + throw new Error("decodeBool: invalid tag " + byte) +} + +// -- String: encode as ByteString via Latin-1 (Encoding.hs:159) +// Haskell's B.pack converts String (list of Char) to ByteString using Latin-1. + +export function encodeString(s: string): Uint8Array { + const bytes = new Uint8Array(s.length) + for (let i = 0; i < s.length; i++) { + bytes[i] = s.charCodeAt(i) & 0xFF + } + return encodeBytes(bytes) +} + +export function decodeString(d: Decoder): string { + const bs = decodeBytes(d) + let s = "" + for (let i = 0; i < bs.length; i++) { + s += String.fromCharCode(bs[i]) + } + return s +} + +// -- Maybe: '0' for Nothing, '1' + encoded value for Just (Encoding.hs:114) + +const CHAR_0 = 0x30 +const CHAR_1 = 0x31 + +export function encodeMaybe(encode: (v: T) => Uint8Array, v: T | null): Uint8Array { + if (v === null) return new Uint8Array([CHAR_0]) + return concatBytes(new Uint8Array([CHAR_1]), encode(v)) +} + +export function decodeMaybe(decode: (d: Decoder) => T, d: Decoder): T | null { + const tag = d.anyByte() + if (tag === CHAR_0) return null + if (tag === CHAR_1) return decode(d) + throw new Error("decodeMaybe: invalid tag " + tag) +} + +// -- NonEmpty: 1-byte length + encoded elements (Encoding.hs:165) +// Fails on empty list (matches Haskell behavior). + +export function encodeNonEmpty(encode: (v: T) => Uint8Array, xs: T[]): Uint8Array { + if (xs.length === 0) throw new Error("encodeNonEmpty: empty list") + if (xs.length > 255) throw new Error("encodeNonEmpty: length exceeds 255") + const parts: Uint8Array[] = [new Uint8Array([xs.length])] + for (const x of xs) parts.push(encode(x)) + return concatBytes(...parts) +} + +export function decodeNonEmpty(decode: (d: Decoder) => T, d: Decoder): T[] { + const len = d.anyByte() + if (len === 0) throw new Error("decodeNonEmpty: empty list") + const result: T[] = [] + for (let i = 0; i < len; i++) result.push(decode(d)) + return result +} + +// -- List encoding (smpEncodeList / smpListP, Encoding.hs:153) + +export function encodeList(encode: (v: T) => Uint8Array, xs: T[]): Uint8Array { + if (xs.length > 255) throw new Error("encodeList: length exceeds 255") + const parts: Uint8Array[] = [new Uint8Array([xs.length])] + for (const x of xs) parts.push(encode(x)) + return concatBytes(...parts) +} + +export function decodeList(decode: (d: Decoder) => T, d: Decoder): T[] { + const len = d.anyByte() + const result: T[] = [] + for (let i = 0; i < len; i++) result.push(decode(d)) + return result +} diff --git a/xftp-web/src/protocol/handshake.ts b/xftp-web/src/protocol/handshake.ts new file mode 100644 index 0000000000..cfd8ccd5bb --- /dev/null +++ b/xftp-web/src/protocol/handshake.ts @@ -0,0 +1,211 @@ +// XFTP handshake encoding/decoding — Simplex.FileTransfer.Transport +// +// Handles XFTP client/server handshake messages and version negotiation. + +import { + Decoder, concatBytes, + encodeWord16, decodeWord16, + encodeBytes, decodeBytes, + encodeMaybe, + decodeLarge, decodeNonEmpty +} from "./encoding.js" +import {sha256} from "../crypto/digest.js" +import {decodePubKeyX25519} from "../crypto/keys.js" +import {blockPad, blockUnpad, XFTP_BLOCK_SIZE} from "./transmission.js" + +// ── Version types ────────────────────────────────────────────────── + +export interface VersionRange { + minVersion: number // Word16 + maxVersion: number // Word16 +} + +// Encode version range as two big-endian Word16s. +// Matches Haskell: smpEncode (VRange v1 v2) = smpEncode (v1, v2) +export function encodeVersionRange(vr: VersionRange): Uint8Array { + return concatBytes(encodeWord16(vr.minVersion), encodeWord16(vr.maxVersion)) +} + +export function decodeVersionRange(d: Decoder): VersionRange { + const minVersion = decodeWord16(d) + const maxVersion = decodeWord16(d) + if (minVersion > maxVersion) throw new Error("invalid version range: min > max") + return {minVersion, maxVersion} +} + +// Version negotiation: intersection of two version ranges, or null if incompatible. +// Matches Haskell compatibleVRange. +export function compatibleVRange(a: VersionRange, b: VersionRange): VersionRange | null { + const min = Math.max(a.minVersion, b.minVersion) + const max = Math.min(a.maxVersion, b.maxVersion) + if (min > max) return null + return {minVersion: min, maxVersion: max} +} + +// ── Client hello ───────────────────────────────────────────────── + +export interface XFTPClientHello { + webChallenge: Uint8Array | null // 32 random bytes for web handshake, or null for standard +} + +// Encode client hello (NOT padded — sent as raw POST body). +// Wire format: smpEncode (Maybe ByteString) +export function encodeClientHello(hello: XFTPClientHello): Uint8Array { + return encodeMaybe(encodeBytes, hello.webChallenge) +} + +// ── Client handshake ─────────────────────────────────────────────── + +export interface XFTPClientHandshake { + xftpVersion: number // Word16 — negotiated version + keyHash: Uint8Array // SHA-256 CA certificate fingerprint (32 bytes) +} + +// Encode and pad client handshake to XFTP_BLOCK_SIZE. +// Wire format: pad(smpEncode (xftpVersion, keyHash), 16384) +export function encodeClientHandshake(ch: XFTPClientHandshake): Uint8Array { + const body = concatBytes(encodeWord16(ch.xftpVersion), encodeBytes(ch.keyHash)) + return blockPad(body, XFTP_BLOCK_SIZE) +} + +// ── Server handshake ─────────────────────────────────────────────── + +export interface XFTPServerHandshake { + xftpVersionRange: VersionRange + sessionId: Uint8Array + certChainDer: Uint8Array[] // raw DER certificate blobs (NonEmpty) + signedKeyDer: Uint8Array // raw DER SignedExact blob + webIdentityProof: Uint8Array | null // signature bytes, or null if absent/empty +} + +// Decode padded server handshake block. +// Wire format: unpad(block) → (versionRange, sessionId, certChainPubKey, sigBytes) +// where certChainPubKey = (NonEmpty Large certChain, Large signedKey) +// sigBytes = ByteString (1-byte len prefix, empty for Nothing) +// Trailing bytes (Tail) are ignored for forward compatibility. +export function decodeServerHandshake(block: Uint8Array): XFTPServerHandshake { + const raw = blockUnpad(block) + const d = new Decoder(raw) + const xftpVersionRange = decodeVersionRange(d) + const sessionId = decodeBytes(d) + // CertChainPubKey: smpEncode (encodeCertChain certChain, SignedObject signedPubKey) + const certChainDer = decodeNonEmpty(decodeLarge, d) + const signedKeyDer = decodeLarge(d) + // webIdentityProof: 1-byte length-prefixed ByteString (empty = Nothing) + let webIdentityProof: Uint8Array | null = null + if (d.remaining() > 0) { + const sigBytes = decodeBytes(d) + webIdentityProof = sigBytes.length === 0 ? null : sigBytes + } + // Remaining bytes are Tail (ignored for forward compatibility) + return {xftpVersionRange, sessionId, certChainDer, signedKeyDer, webIdentityProof} +} + +// ── Certificate utilities ────────────────────────────────────────── + +// Certificate chain decomposition matching Haskell chainIdCaCerts (Transport.Shared). +export type ChainCertificates = + | {type: 'empty'} + | {type: 'self'; cert: Uint8Array} + | {type: 'valid'; leafCert: Uint8Array; idCert: Uint8Array; caCert: Uint8Array} + | {type: 'long'} + +export function chainIdCaCerts(certChainDer: Uint8Array[]): ChainCertificates { + switch (certChainDer.length) { + case 0: return {type: 'empty'} + case 1: return {type: 'self', cert: certChainDer[0]} + case 2: return {type: 'valid', leafCert: certChainDer[0], idCert: certChainDer[1], caCert: certChainDer[1]} + case 3: return {type: 'valid', leafCert: certChainDer[0], idCert: certChainDer[1], caCert: certChainDer[2]} + case 4: return {type: 'valid', leafCert: certChainDer[0], idCert: certChainDer[1], caCert: certChainDer[3]} + default: return {type: 'long'} + } +} + +// SHA-256 fingerprint of the identity certificate. +// For 2-cert chains: idCert = last cert (same as CA). +// For 3+ cert chains: idCert = second cert (distinct from CA). +// Matches Haskell: getFingerprint idCert HashSHA256 +export function caFingerprint(certChainDer: Uint8Array[]): Uint8Array { + const cc = chainIdCaCerts(certChainDer) + if (cc.type !== 'valid') throw new Error("caFingerprint: need valid chain (2-4 certs)") + return sha256(cc.idCert) +} + +// ── SignedExact DER parsing ──────────────────────────────────────── + +// Parsed components of an X.509 SignedExact structure. +export interface SignedKey { + objectDer: Uint8Array // raw DER of the signed object (SubjectPublicKeyInfo) + dhKey: Uint8Array // extracted 32-byte X25519 public key + algorithm: Uint8Array // AlgorithmIdentifier DER bytes + signature: Uint8Array // raw signature bytes (Ed25519: 64, Ed448: 114) +} + +// Parse ASN.1 DER length (short and long form). +function derLength(d: Decoder): number { + const first = d.anyByte() + if (first < 0x80) return first + const numBytes = first & 0x7f + if (numBytes === 0 || numBytes > 4) throw new Error("DER: unsupported length encoding") + let len = 0 + for (let i = 0; i < numBytes; i++) { + len = (len << 8) | d.anyByte() + } + return len +} + +// Read a complete TLV element, returning the full DER bytes (tag + length + value). +function derElement(d: Decoder): Uint8Array { + const start = d.offset() + d.anyByte() // tag + const len = derLength(d) + d.take(len) // value + return d.buf.subarray(start, d.offset()) +} + +// Extract components from a SignedExact X.PubKey DER structure. +// ASN.1 layout: +// SEQUENCE { +// SubjectPublicKeyInfo (SEQUENCE) — the signed object +// AlgorithmIdentifier (SEQUENCE) — signature algorithm +// BIT STRING — signature +// } +export function extractSignedKey(signedDer: Uint8Array): SignedKey { + const outer = new Decoder(signedDer) + const outerTag = outer.anyByte() + if (outerTag !== 0x30) throw new Error("SignedExact: expected SEQUENCE tag 0x30, got 0x" + outerTag.toString(16)) + derLength(outer) // consume total content length + + // First element: SubjectPublicKeyInfo + const objectDer = derElement(outer) + + // Second element: AlgorithmIdentifier + const algorithm = derElement(outer) + + // Third element: BIT STRING (signature) + const sigTag = outer.anyByte() + if (sigTag !== 0x03) throw new Error("SignedExact: expected BIT STRING tag 0x03, got 0x" + sigTag.toString(16)) + const sigLen = derLength(outer) + const unusedBits = outer.anyByte() + if (unusedBits !== 0) throw new Error("SignedExact: expected 0 unused bits in signature") + const signature = outer.take(sigLen - 1) + + // Extract X25519 key from the signed object. + // objectDer may be the raw SPKI (44 bytes) or a wrapper SEQUENCE + // from x509 objectToSignedExact which wraps toASN1 in Start Sequence. + const dhKey = decodeX25519Key(objectDer) + + return {objectDer, dhKey, algorithm, signature} +} + +// Extract X25519 raw public key from either direct SPKI (44 bytes) +// or a wrapper SEQUENCE containing the SPKI. +function decodeX25519Key(der: Uint8Array): Uint8Array { + if (der.length === 44) return decodePubKeyX25519(der) + if (der[0] !== 0x30) throw new Error("decodeX25519Key: expected SEQUENCE") + const d = new Decoder(der) + d.anyByte() + derLength(d) + const inner = derElement(d) + return decodePubKeyX25519(inner) +} diff --git a/xftp-web/src/protocol/transmission.ts b/xftp-web/src/protocol/transmission.ts new file mode 100644 index 0000000000..75d00890b2 --- /dev/null +++ b/xftp-web/src/protocol/transmission.ts @@ -0,0 +1,112 @@ +// XFTP transmission framing — Simplex.Messaging.Transport + FileTransfer.Protocol +// +// Handles block-level pad/unpad, batch encoding, and Ed25519 auth signing. + +import { + Decoder, concatBytes, + encodeBytes, decodeBytes, + encodeLarge, decodeLarge +} from "./encoding.js" +import {sign} from "../crypto/keys.js" + +// ── Constants ───────────────────────────────────────────────────── + +export const XFTP_BLOCK_SIZE = 16384 + +// Protocol versions (FileTransfer.Transport) +export const initialXFTPVersion = 1 +export const authCmdsXFTPVersion = 2 +export const blockedFilesXFTPVersion = 3 +export const currentXFTPVersion = 3 + +// ── Block-level pad/unpad (Crypto.hs:pad/unPad, strict ByteString) ── + +export function blockPad(msg: Uint8Array, blockSize: number = XFTP_BLOCK_SIZE): Uint8Array { + const len = msg.length + const padLen = blockSize - len - 2 + if (padLen < 0) throw new Error("blockPad: message too large for block") + const result = new Uint8Array(blockSize) + result[0] = (len >>> 8) & 0xff + result[1] = len & 0xff + result.set(msg, 2) + result.fill(0x23, 2 + len) // '#' padding + return result +} + +export function blockUnpad(block: Uint8Array): Uint8Array { + if (block.length < 2) throw new Error("blockUnpad: too short") + const len = (block[0] << 8) | block[1] + if (2 + len > block.length) throw new Error("blockUnpad: invalid length") + return block.subarray(2, 2 + len) +} + +// ── Transmission encoding (client -> server) ────────────────────── + +// Encode an authenticated XFTP command as a padded block. +// Matches xftpEncodeAuthTransmission with implySessId = False: +// sessionId is included in both signed data AND wire data. +export function encodeAuthTransmission( + sessionId: Uint8Array, + corrId: Uint8Array, + entityId: Uint8Array, + cmdBytes: Uint8Array, + privateKey: Uint8Array +): Uint8Array { + // t' = encodeTransmission_ v t = smpEncode (corrId, entityId) <> cmdBytes + const tInner = concatBytes(encodeBytes(corrId), encodeBytes(entityId), cmdBytes) + // tForAuth = smpEncode sessionId <> t' + const tForAuth = concatBytes(encodeBytes(sessionId), tInner) + const signature = sign(privateKey, tForAuth) + const authenticator = encodeBytes(signature) + // implySessId = False: tToSend = tForAuth (sessionId on wire) + const encoded = concatBytes(authenticator, tForAuth) + const batch = concatBytes(new Uint8Array([1]), encodeLarge(encoded)) + return blockPad(batch) +} + +// Encode an unsigned XFTP command (e.g. PING) as a padded block. +// Matches xftpEncodeTransmission with implySessId = False: sessionId on wire. +export function encodeTransmission( + sessionId: Uint8Array, + corrId: Uint8Array, + entityId: Uint8Array, + cmdBytes: Uint8Array +): Uint8Array { + const tInner = concatBytes(encodeBytes(sessionId), encodeBytes(corrId), encodeBytes(entityId), cmdBytes) + // No auth: tEncodeAuth False Nothing = smpEncode B.empty = \x00 + const authenticator = encodeBytes(new Uint8Array(0)) + const encoded = concatBytes(authenticator, tInner) + const batch = concatBytes(new Uint8Array([1]), encodeLarge(encoded)) + return blockPad(batch) +} + +// ── Transmission decoding (server -> client) ────────────────────── + +export interface DecodedTransmission { + corrId: Uint8Array + entityId: Uint8Array + command: Uint8Array +} + +// Decode a server response block into raw parts. +// Call decodeResponse(command) from commands.ts to parse the response. +// Matches xftpDecodeTClient with implySessId = False: reads and verifies sessionId from wire. +export function decodeTransmission(sessionId: Uint8Array, block: Uint8Array): DecodedTransmission { + const raw = blockUnpad(block) + const d = new Decoder(raw) + const count = d.anyByte() + if (count !== 1) throw new Error("decodeTransmission: expected batch count 1, got " + count) + const transmission = decodeLarge(d) + const td = new Decoder(transmission) + // Skip authenticator (server responses have empty auth) + decodeBytes(td) + // implySessId = False: read sessionId from wire and verify + const sessId = decodeBytes(td) + if (sessId.length !== sessionId.length || !sessId.every((b, i) => b === sessionId[i])) { + throw new Error("decodeTransmission: session ID mismatch") + } + const corrId = decodeBytes(td) + const entityId = decodeBytes(td) + const command = td.takeAll() + return {corrId, entityId, command} +} diff --git a/xftp-web/test/__screenshots__/browser.test.ts/browser-upload---download-round-trip-1.png b/xftp-web/test/__screenshots__/browser.test.ts/browser-upload---download-round-trip-1.png new file mode 100644 index 0000000000..850d5b364e Binary files /dev/null and b/xftp-web/test/__screenshots__/browser.test.ts/browser-upload---download-round-trip-1.png differ diff --git a/xftp-web/test/browser.test.ts b/xftp-web/test/browser.test.ts new file mode 100644 index 0000000000..26a9670ca3 --- /dev/null +++ b/xftp-web/test/browser.test.ts @@ -0,0 +1,19 @@ +import {test, expect} from 'vitest' +import {encryptFileForUpload, uploadFile, downloadFile, newXFTPAgent, closeXFTPAgent} from '../src/agent.js' +import {parseXFTPServer} from '../src/protocol/address.js' + +const server = parseXFTPServer(import.meta.env.XFTP_SERVER) + +test('browser upload + download round-trip', async () => { + const agent = newXFTPAgent() + try { + const data = new Uint8Array(50000) + crypto.getRandomValues(data) + const encrypted = encryptFileForUpload(data, 'test.bin') + const {rcvDescription} = await uploadFile(agent, server, encrypted) + const {content} = await downloadFile(agent, rcvDescription) + expect(content).toEqual(data) + } finally { + closeXFTPAgent(agent) + } +}) diff --git a/xftp-web/test/globalSetup.ts b/xftp-web/test/globalSetup.ts new file mode 100644 index 0000000000..ab42891545 --- /dev/null +++ b/xftp-web/test/globalSetup.ts @@ -0,0 +1,170 @@ +import {spawn, execSync, ChildProcess} from 'child_process' +import {createHash} from 'crypto' +import {createConnection, createServer} from 'net' +import {resolve, join} from 'path' +import {readFileSync, mkdtempSync, writeFileSync, copyFileSync, existsSync, unlinkSync} from 'fs' +import {tmpdir} from 'os' + +const LOCK_FILE = join(tmpdir(), 'xftp-test-server.pid') +export const PORT_FILE = join(tmpdir(), 'xftp-test-server.port') + +// Find a free port by binding to port 0 +function findFreePort(): Promise { + return new Promise((resolve, reject) => { + const srv = createServer() + srv.listen(0, '127.0.0.1', () => { + const addr = srv.address() + if (addr && typeof addr === 'object') { + const port = addr.port + srv.close(() => resolve(port)) + } else { + srv.close(() => reject(new Error('Could not get port'))) + } + }) + srv.on('error', reject) + }) +} + +let server: ChildProcess | null = null +let isOwner = false + +export async function setup() { + // Check if another test process owns the server + if (existsSync(LOCK_FILE) && existsSync(PORT_FILE)) { + const pid = parseInt(readFileSync(LOCK_FILE, 'utf-8').trim(), 10) + const port = parseInt(readFileSync(PORT_FILE, 'utf-8').trim(), 10) + try { + process.kill(pid, 0) // check if process exists + // Lock owner is alive — wait for server to be ready + await waitForPort(port) + return + } catch (_) { + // Lock owner is dead — clean up + try { unlinkSync(LOCK_FILE) } catch (_) {} + try { unlinkSync(PORT_FILE) } catch (_) {} + } + } + + // Find a free port dynamically + const xftpPort = await findFreePort() + + writeFileSync(LOCK_FILE, String(process.pid)) + writeFileSync(PORT_FILE, String(xftpPort)) + isOwner = true + + const fixtures = resolve(__dirname, '../../tests/fixtures') + + // Create temp directories + const cfgDir = mkdtempSync(join(tmpdir(), 'xftp-cfg-')) + const logDir = mkdtempSync(join(tmpdir(), 'xftp-log-')) + const filesDir = mkdtempSync(join(tmpdir(), 'xftp-files-')) + + // Copy certificates to cfgDir (xftp-server expects ca.crt, server.key, server.crt there) + copyFileSync(join(fixtures, 'ca.crt'), join(cfgDir, 'ca.crt')) + copyFileSync(join(fixtures, 'server.key'), join(cfgDir, 'server.key')) + copyFileSync(join(fixtures, 'server.crt'), join(cfgDir, 'server.crt')) + + // Write fingerprint file (checkSavedFingerprint reads this on startup) + // Fingerprint = SHA-256 of DER-encoded certificate (not PEM) + const pem = readFileSync(join(fixtures, 'ca.crt'), 'utf-8') + const der = Buffer.from(pem.replace(/-----[^-]+-----/g, '').replace(/\s/g, ''), 'base64') + const fp = createHash('sha256').update(der).digest('base64').replace(/\+/g, '-').replace(/\//g, '_') + writeFileSync(join(cfgDir, 'fingerprint'), fp + '\n') + + // Write INI config file + const iniContent = `[STORE_LOG] +enable: off + +[TRANSPORT] +host: localhost +port: ${xftpPort} + +[FILES] +path: ${filesDir} + +[WEB] +cert: ${join(fixtures, 'web.crt')} +key: ${join(fixtures, 'web.key')} +` + writeFileSync(join(cfgDir, 'file-server.ini'), iniContent) + + // Resolve binary path once (avoids cabal rebuild check on every run) + const serverBin = execSync('cabal -v0 list-bin xftp-server', {encoding: 'utf-8'}).trim() + + // Spawn xftp-server directly + server = spawn(serverBin, ['start'], { + env: { + ...process.env, + XFTP_SERVER_CFG_PATH: cfgDir, + XFTP_SERVER_LOG_PATH: logDir + }, + stdio: ['ignore', 'pipe', 'pipe'] + }) + + server.stderr?.on('data', (data: Buffer) => { + console.error('[xftp-server]', data.toString()) + }) + + // Poll-connect until the server is actually listening + await waitForServerReady(server, xftpPort) +} + +export async function teardown() { + if (isOwner) { + try { unlinkSync(LOCK_FILE) } catch (_) {} + try { unlinkSync(PORT_FILE) } catch (_) {} + if (server) { + server.kill('SIGTERM') + await new Promise(resolve => { + server!.on('exit', () => resolve()) + setTimeout(resolve, 3000) + }) + } + } +} + +function waitForServerReady(proc: ChildProcess, port: number): Promise { + return new Promise((resolve, reject) => { + let settled = false + const timeout = setTimeout(() => { + settled = true + reject(new Error('Server start timeout')) + }, 15000) + const settle = (fn: () => void) => { if (!settled) { settled = true; clearTimeout(timeout); fn() } } + proc.on('error', (e) => settle(() => reject(e))) + proc.on('exit', (code) => { + if (code !== 0) settle(() => reject(new Error(`Server exited with code ${code}`))) + }) + // printXFTPConfig prints "Listening on port" BEFORE bind, so poll-connect + const poll = () => { + if (settled) return + const sock = createConnection({port, host: 'localhost'}, () => { + sock.destroy() + settle(() => resolve()) + }) + sock.on('error', () => { + sock.destroy() + setTimeout(poll, 100) + }) + } + setTimeout(poll, 200) + }) +} + +function waitForPort(port: number): Promise { + return new Promise((resolve, reject) => { + const deadline = Date.now() + 15000 + const poll = () => { + if (Date.now() > deadline) return reject(new Error('Timed out waiting for server')) + const sock = createConnection({port, host: 'localhost'}, () => { + sock.destroy() + resolve() + }) + sock.on('error', () => { + sock.destroy() + setTimeout(poll, 100) + }) + } + poll() + }) +} diff --git a/xftp-web/test/page.spec.ts b/xftp-web/test/page.spec.ts new file mode 100644 index 0000000000..bcdcd2490d --- /dev/null +++ b/xftp-web/test/page.spec.ts @@ -0,0 +1,42 @@ +import {test, expect} from '@playwright/test' + +const PAGE_URL = 'http://localhost:4173' + +test('page upload + download round-trip', async ({page}) => { + // Upload page + await page.goto(PAGE_URL) + await expect(page.locator('#drop-zone')).toBeVisible() + + // Create a small test file + const content = 'Hello SimpleX ' + Date.now() + const fileName = 'test-file.txt' + const buffer = Buffer.from(content, 'utf-8') + + // Set file via hidden input + const fileInput = page.locator('#file-input') + await fileInput.setInputFiles({name: fileName, mimeType: 'text/plain', buffer}) + + // Wait for upload to complete + const shareLink = page.locator('[data-testid="share-link"]') + await expect(shareLink).toBeVisible({timeout: 30_000}) + + // Extract the hash from the share link + const linkValue = await shareLink.inputValue() + const hash = new URL(linkValue).hash + + // Navigate to download page + await page.goto(PAGE_URL + hash) + await expect(page.locator('#dl-btn')).toBeVisible() + + // Start download and wait for completion + const downloadPromise = page.waitForEvent('download') + await page.locator('#dl-btn').click() + const download = await downloadPromise + + // Verify downloaded file + expect(download.suggestedFilename()).toBe(fileName) + const downloadedContent = (await download.path()) !== null + ? (await import('fs')).readFileSync(await download.path()!, 'utf-8') + : '' + expect(downloadedContent).toBe(content) +}) diff --git a/xftp-web/tsconfig.json b/xftp-web/tsconfig.json new file mode 100644 index 0000000000..e42e74895d --- /dev/null +++ b/xftp-web/tsconfig.json @@ -0,0 +1,19 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ES2022", + "moduleResolution": "node", + "lib": ["ES2022"], + "outDir": "dist", + "rootDir": "src", + "declaration": true, + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "sourceMap": true + }, + "include": ["src/**/*.ts"], + "exclude": ["node_modules", "dist", "test"] +} diff --git a/xftp-web/tsconfig.web.json b/xftp-web/tsconfig.web.json new file mode 100644 index 0000000000..476d40b5e5 --- /dev/null +++ b/xftp-web/tsconfig.web.json @@ -0,0 +1,12 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "rootDir": ".", + "noEmit": true, + "types": [], + "moduleResolution": "bundler", + "lib": ["ES2022", "DOM"] + }, + "include": ["web/**/*.ts", "src/**/*.ts"], + "exclude": ["web/crypto.worker.ts"] +} diff --git a/xftp-web/tsconfig.worker.json b/xftp-web/tsconfig.worker.json new file mode 100644 index 0000000000..0335541dc9 --- /dev/null +++ b/xftp-web/tsconfig.worker.json @@ -0,0 +1,11 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "rootDir": ".", + "noEmit": true, + "types": [], + "moduleResolution": "bundler", + "lib": ["ES2022", "WebWorker"] + }, + "include": ["web/crypto.worker.ts", "src/**/*.ts"] +} diff --git a/xftp-web/vite.config.ts b/xftp-web/vite.config.ts new file mode 100644 index 0000000000..e5f361672a --- /dev/null +++ b/xftp-web/vite.config.ts @@ -0,0 +1,58 @@ +import {defineConfig, type Plugin} from 'vite' +import {readFileSync} from 'fs' +import {createHash} from 'crypto' +import {resolve} from 'path' +import presets from './web/servers.json' +import {PORT_FILE} from './test/globalSetup' + +const __dirname = import.meta.dirname + +function parseHost(addr: string): string { + const m = addr.match(/@(.+)$/) + if (!m) throw new Error('bad server address: ' + addr) + const host = m[1].split(',')[0] + return host.includes(':') ? host : host + ':443' +} + +function cspPlugin(servers: string[]): Plugin { + const origins = servers.map(s => 'https://' + parseHost(s)).join(' ') + return { + name: 'csp-connect-src', + transformIndexHtml: { + order: 'pre', + handler(html, ctx) { + if (ctx.server) { + return html.replace(/]*?Content-Security-Policy[\s\S]*?>/i, '') + } + return html.replace('__CSP_CONNECT_SRC__', origins) + } + } + } +} + +export default defineConfig(({mode}) => { + const define: Record = {} + let servers: string[] + + if (mode === 'development') { + const pem = readFileSync('../tests/fixtures/ca.crt', 'utf-8') + const der = Buffer.from(pem.replace(/-----[^-]+-----/g, '').replace(/\s/g, ''), 'base64') + const fp = createHash('sha256').update(der).digest('base64') + .replace(/\+/g, '-').replace(/\//g, '_') + // PORT_FILE is written by globalSetup before vite build runs + const port = readFileSync(PORT_FILE, 'utf-8').trim() + servers = [`xftp://${fp}@localhost:${port}`] + define['__XFTP_SERVERS__'] = JSON.stringify(servers) + } else { + servers = [...presets.simplex, ...presets.flux] + } + + return { + root: 'web', + build: {outDir: resolve(__dirname, 'dist-web'), target: 'esnext'}, + preview: {host: true}, + define, + worker: {format: 'es' as const}, + plugins: [cspPlugin(servers)], + } +}) diff --git a/xftp-web/vitest.config.ts b/xftp-web/vitest.config.ts new file mode 100644 index 0000000000..6f7461981a --- /dev/null +++ b/xftp-web/vitest.config.ts @@ -0,0 +1,41 @@ +import {defineConfig, type Plugin} from 'vitest/config' +import {readFileSync} from 'fs' +import {createHash} from 'crypto' +import {PORT_FILE} from './test/globalSetup' + +// Compute fingerprint from ca.crt (SHA-256 of DER, same as Haskell's loadFileFingerprint) +const pem = readFileSync('../tests/fixtures/ca.crt', 'utf-8') +const der = Buffer.from(pem.replace(/-----[^-]+-----/g, '').replace(/\s/g, ''), 'base64') +const fingerprint = createHash('sha256').update(der).digest('base64').replace(/\+/g, '-').replace(/\//g, '_') + +// Plugin to inject XFTP_SERVER at transform time (after globalSetup writes PORT_FILE) +function xftpServerPlugin(): Plugin { + let serverAddr: string | null = null + return { + name: 'xftp-server-define', + transform(code, id) { + if (!code.includes('import.meta.env.XFTP_SERVER')) return null + if (!serverAddr) { + const port = readFileSync(PORT_FILE, 'utf-8').trim() + serverAddr = `xftp://${fingerprint}@localhost:${port}` + } + return code.replace(/import\.meta\.env\.XFTP_SERVER/g, JSON.stringify(serverAddr)) + } + } +} + +export default defineConfig({ + esbuild: {target: 'esnext'}, + optimizeDeps: {esbuildOptions: {target: 'esnext'}}, + plugins: [xftpServerPlugin()], + test: { + include: ['test/**/*.test.ts'], + browser: { + enabled: true, + provider: 'playwright', + instances: [{browser: 'chromium'}], + headless: true + }, + globalSetup: './test/globalSetup.ts' + } +}) diff --git a/xftp-web/web/crypto-backend.ts b/xftp-web/web/crypto-backend.ts new file mode 100644 index 0000000000..8d12cd7d8d --- /dev/null +++ b/xftp-web/web/crypto-backend.ts @@ -0,0 +1,112 @@ +import type {FileHeader} from '../src/crypto/file.js' + +export interface CryptoBackend { + encrypt(data: Uint8Array, fileName: string, + onProgress?: (done: number, total: number) => void + ): Promise + readChunk(offset: number, size: number): Promise + decryptAndStoreChunk( + dhSecret: Uint8Array, nonce: Uint8Array, + body: Uint8Array, digest: Uint8Array, chunkNo: number + ): Promise + verifyAndDecrypt(params: {size: number, digest: Uint8Array, key: Uint8Array, nonce: Uint8Array} + ): Promise<{header: FileHeader, content: Uint8Array}> + cleanup(): Promise +} + +export interface EncryptResult { + digest: Uint8Array + key: Uint8Array + nonce: Uint8Array + chunkSizes: number[] +} + +type PendingRequest = {resolve: (value: any) => void, reject: (reason: any) => void} + +class WorkerBackend implements CryptoBackend { + private worker: Worker + private pending = new Map() + private nextId = 1 + private progressCb: ((done: number, total: number) => void) | null = null + + constructor() { + this.worker = new Worker(new URL('./crypto.worker.ts', import.meta.url), {type: 'module'}) + this.worker.onmessage = (e) => this.handleMessage(e.data) + } + + private handleMessage(msg: {id: number, type: string, [k: string]: any}) { + if (msg.type === 'progress') { + this.progressCb?.(msg.done, msg.total) + return + } + const p = this.pending.get(msg.id) + if (!p) return + this.pending.delete(msg.id) + if (msg.type === 'error') { + p.reject(new Error(msg.message)) + } else { + p.resolve(msg) + } + } + + private send(msg: Record, transfer?: Transferable[]): Promise { + const id = this.nextId++ + return new Promise((resolve, reject) => { + this.pending.set(id, {resolve, reject}) + this.worker.postMessage({...msg, id}, transfer ?? []) + }) + } + + private toTransferable(data: Uint8Array): ArrayBuffer { + if (data.byteOffset !== 0 || data.byteLength !== data.buffer.byteLength) { + return data.buffer.slice(data.byteOffset, data.byteOffset + data.byteLength) as ArrayBuffer + } + return data.buffer as ArrayBuffer + } + + async encrypt(data: Uint8Array, fileName: string, + onProgress?: (done: number, total: number) => void): Promise { + this.progressCb = onProgress ?? null + const buf = this.toTransferable(data) + const resp = await this.send({type: 'encrypt', data: buf, fileName}, [buf]) + this.progressCb = null + return {digest: resp.digest, key: resp.key, nonce: resp.nonce, chunkSizes: resp.chunkSizes} + } + + async readChunk(offset: number, size: number): Promise { + const resp = await this.send({type: 'readChunk', offset, size}) + return new Uint8Array(resp.data) + } + + async decryptAndStoreChunk( + dhSecret: Uint8Array, nonce: Uint8Array, + body: Uint8Array, digest: Uint8Array, chunkNo: number + ): Promise { + const buf = this.toTransferable(body) + await this.send( + {type: 'decryptAndStoreChunk', dhSecret, nonce, body: buf, chunkDigest: digest, chunkNo}, + [buf] + ) + } + + async verifyAndDecrypt(params: {size: number, digest: Uint8Array, key: Uint8Array, nonce: Uint8Array} + ): Promise<{header: FileHeader, content: Uint8Array}> { + const resp = await this.send({ + type: 'verifyAndDecrypt', + size: params.size, digest: params.digest, key: params.key, nonce: params.nonce + }) + return {header: resp.header, content: new Uint8Array(resp.content)} + } + + async cleanup(): Promise { + await this.send({type: 'cleanup'}) + this.worker.terminate() + } +} + +export function createCryptoBackend(): CryptoBackend { + if (typeof Worker === 'undefined') { + throw new Error('Web Workers required — update your browser') + } + return new WorkerBackend() +} diff --git a/xftp-web/web/crypto.worker.ts b/xftp-web/web/crypto.worker.ts new file mode 100644 index 0000000000..761322faa8 --- /dev/null +++ b/xftp-web/web/crypto.worker.ts @@ -0,0 +1,226 @@ +import sodium from 'libsodium-wrappers-sumo' +import {encryptFile, encodeFileHeader, decryptChunks} from '../src/crypto/file.js' +import {sha512} from '../src/crypto/digest.js' +import {prepareChunkSizes, fileSizeLen, authTagSize} from '../src/protocol/chunks.js' +import {concatBytes} from '../src/protocol/encoding.js' +import {decryptReceivedChunk} from '../src/download.js' + +// ── OPFS session management ───────────────────────────────────── + +const SESSION_DIR = `session-${Date.now()}-${crypto.randomUUID()}` +let uploadReadHandle: FileSystemSyncAccessHandle | null = null +let downloadWriteHandle: FileSystemSyncAccessHandle | null = null +const chunkMeta = new Map() +let currentDownloadOffset = 0 +let sessionDir: FileSystemDirectoryHandle | null = null + +async function getSessionDir(): Promise { + if (!sessionDir) { + const root = await navigator.storage.getDirectory() + sessionDir = await root.getDirectoryHandle(SESSION_DIR, {create: true}) + } + return sessionDir +} + +async function sweepStale() { + const root = await navigator.storage.getDirectory() + const oneHourAgo = Date.now() - 3600_000 + for await (const [name] of (root as any).entries()) { + if (!name.startsWith('session-')) continue + const parts = name.split('-') + const ts = parseInt(parts[1], 10) + if (!isNaN(ts) && ts < oneHourAgo) { + try { await root.removeEntry(name, {recursive: true}) } catch (_) {} + } + } +} + +// ── Message handlers ──────────────────────────────────────────── + +async function handleEncrypt(id: number, data: ArrayBuffer, fileName: string) { + const source = new Uint8Array(data) + const key = new Uint8Array(32) + const nonce = new Uint8Array(24) + crypto.getRandomValues(key) + crypto.getRandomValues(nonce) + const fileHdr = encodeFileHeader({fileName, fileExtra: null}) + const fileSize = BigInt(fileHdr.length + source.length) + const payloadSize = Number(fileSize) + fileSizeLen + authTagSize + const chunkSizes = prepareChunkSizes(payloadSize) + const encSize = BigInt(chunkSizes.reduce((a: number, b: number) => a + b, 0)) + const encData = encryptFile(source, fileHdr, key, nonce, fileSize, encSize) + + self.postMessage({id, type: 'progress', done: 50, total: 100}) + + const digest = sha512(encData) + + self.postMessage({id, type: 'progress', done: 80, total: 100}) + + // Write to OPFS + const dir = await getSessionDir() + const fileHandle = await dir.getFileHandle('upload.bin', {create: true}) + const writeHandle = await fileHandle.createSyncAccessHandle() + writeHandle.write(encData) + writeHandle.flush() + writeHandle.close() + + // Reopen as persistent read handle + uploadReadHandle = await fileHandle.createSyncAccessHandle() + + self.postMessage({id, type: 'progress', done: 100, total: 100}) + self.postMessage({id, type: 'encrypted', digest, key, nonce, chunkSizes}) +} + +function handleReadChunk(id: number, offset: number, size: number) { + if (!uploadReadHandle) { + self.postMessage({id, type: 'error', message: 'No upload file open'}) + return + } + const buf = new Uint8Array(size) + uploadReadHandle.read(buf, {at: offset}) + const ab = buf.buffer as ArrayBuffer + self.postMessage({id, type: 'chunk', data: ab}, [ab]) +} + +async function handleDecryptAndStore( + id: number, dhSecret: Uint8Array, nonce: Uint8Array, + body: ArrayBuffer, chunkDigest: Uint8Array, chunkNo: number +) { + const bodyArr = new Uint8Array(body) + const decrypted = decryptReceivedChunk(dhSecret, nonce, bodyArr, chunkDigest) + + if (!downloadWriteHandle) { + const dir = await getSessionDir() + const fileHandle = await dir.getFileHandle('download.bin', {create: true}) + downloadWriteHandle = await fileHandle.createSyncAccessHandle() + } + + const offset = currentDownloadOffset + currentDownloadOffset += decrypted.length + chunkMeta.set(chunkNo, {offset, size: decrypted.length}) + downloadWriteHandle.write(decrypted, {at: offset}) + downloadWriteHandle.flush() + + self.postMessage({id, type: 'stored'}) +} + +async function handleVerifyAndDecrypt( + id: number, size: number, digest: Uint8Array, key: Uint8Array, nonce: Uint8Array +) { + // Close write handle, reopen as read + if (downloadWriteHandle) { + downloadWriteHandle.flush() + downloadWriteHandle.close() + downloadWriteHandle = null + } + + const dir = await getSessionDir() + const fileHandle = await dir.getFileHandle('download.bin') + const readHandle = await fileHandle.createSyncAccessHandle() + + // Read chunks ordered by chunkNo + const sortedEntries = [...chunkMeta.entries()].sort((a, b) => a[0] - b[0]) + const chunks: Uint8Array[] = [] + for (const [, meta] of sortedEntries) { + const buf = new Uint8Array(meta.size) + readHandle.read(buf, {at: meta.offset}) + chunks.push(buf) + } + readHandle.close() + + // Verify size + const combined = chunks.length === 1 ? chunks[0] : concatBytes(...chunks) + if (combined.length !== size) { + self.postMessage({id, type: 'error', message: `File size mismatch: ${combined.length} !== ${size}`}) + return + } + + // Verify SHA-512 digest + const actualDigest = sha512(combined) + if (!digestEqual(actualDigest, digest)) { + self.postMessage({id, type: 'error', message: 'File digest mismatch'}) + return + } + + // File-level decrypt + const result = decryptChunks(BigInt(size), chunks, key, nonce) + + // Clean up download file + try { await dir.removeEntry('download.bin') } catch (_) {} + chunkMeta.clear() + currentDownloadOffset = 0 + + const contentBuf = result.content.buffer.slice( + result.content.byteOffset, + result.content.byteOffset + result.content.byteLength + ) + self.postMessage( + {id, type: 'decrypted', header: result.header, content: contentBuf}, + [contentBuf] + ) +} + +async function handleCleanup(id: number) { + if (uploadReadHandle) { + uploadReadHandle.close() + uploadReadHandle = null + } + if (downloadWriteHandle) { + downloadWriteHandle.close() + downloadWriteHandle = null + } + chunkMeta.clear() + currentDownloadOffset = 0 + try { + const root = await navigator.storage.getDirectory() + await root.removeEntry(SESSION_DIR, {recursive: true}) + } catch (_) {} + sessionDir = null + self.postMessage({id, type: 'cleaned'}) +} + +// ── Message dispatch ──────────────────────────────────────────── + +self.onmessage = async (e: MessageEvent) => { + await initPromise + const msg = e.data + try { + switch (msg.type) { + case 'encrypt': + await handleEncrypt(msg.id, msg.data, msg.fileName) + break + case 'readChunk': + handleReadChunk(msg.id, msg.offset, msg.size) + break + case 'decryptAndStoreChunk': + await handleDecryptAndStore(msg.id, msg.dhSecret, msg.nonce, msg.body, msg.chunkDigest, msg.chunkNo) + break + case 'verifyAndDecrypt': + await handleVerifyAndDecrypt(msg.id, msg.size, msg.digest, msg.key, msg.nonce) + break + case 'cleanup': + await handleCleanup(msg.id) + break + default: + self.postMessage({id: msg.id, type: 'error', message: `Unknown message type: ${msg.type}`}) + } + } catch (err: any) { + self.postMessage({id: msg.id, type: 'error', message: err?.message ?? String(err)}) + } +} + +// ── Helpers ───────────────────────────────────────────────────── + +function digestEqual(a: Uint8Array, b: Uint8Array): boolean { + if (a.length !== b.length) return false + let diff = 0 + for (let i = 0; i < a.length; i++) diff |= a[i] ^ b[i] + return diff === 0 +} + +// ── Init ──────────────────────────────────────────────────────── + +const initPromise = (async () => { + await sodium.ready + await sweepStale() +})() diff --git a/xftp-web/web/download.ts b/xftp-web/web/download.ts new file mode 100644 index 0000000000..74c50bd7c4 --- /dev/null +++ b/xftp-web/web/download.ts @@ -0,0 +1,137 @@ +import {createCryptoBackend} from './crypto-backend.js' +import {createProgressRing} from './progress.js' +import { + newXFTPAgent, closeXFTPAgent, + decodeDescriptionURI, downloadFileRaw, ackFileChunks +} from '../src/agent.js' + +export function initDownload(app: HTMLElement, hash: string) { + let fd: ReturnType + try { + fd = decodeDescriptionURI(hash) + } catch (err: any) { + app.innerHTML = `

Invalid or corrupted link.

` + return + } + + const size = fd.redirect ? fd.redirect.size : fd.size + app.innerHTML = ` +
` + + const readyStage = document.getElementById('dl-ready')! + const progressStage = document.getElementById('dl-progress')! + const errorStage = document.getElementById('dl-error')! + const progressContainer = document.getElementById('dl-progress-container')! + const statusText = document.getElementById('dl-status')! + const dlBtn = document.getElementById('dl-btn')! + const errorMsg = document.getElementById('dl-error-msg')! + const retryBtn = document.getElementById('dl-retry-btn')! + + function showStage(stage: HTMLElement) { + for (const s of [readyStage, progressStage, errorStage]) s.hidden = true + stage.hidden = false + } + + function showError(msg: string) { + errorMsg.textContent = msg + showStage(errorStage) + } + + dlBtn.addEventListener('click', startDownload) + retryBtn.addEventListener('click', startDownload) + + async function startDownload() { + showStage(progressStage) + const ring = createProgressRing() + progressContainer.innerHTML = '' + progressContainer.appendChild(ring.canvas) + statusText.textContent = 'Downloading…' + + const backend = createCryptoBackend() + const agent = newXFTPAgent() + + try { + const resolvedFd = await downloadFileRaw(agent, fd, async (raw) => { + await backend.decryptAndStoreChunk( + raw.dhSecret, raw.nonce, raw.body, raw.digest, raw.chunkNo + ) + }, { + onProgress: (downloaded, total) => { + ring.update(downloaded / total * 0.8) + }, + concurrency: 3 + }) + + statusText.textContent = 'Decrypting…' + ring.update(0.85) + + const {header, content} = await backend.verifyAndDecrypt({ + size: resolvedFd.size, + digest: resolvedFd.digest, + key: resolvedFd.key, + nonce: resolvedFd.nonce + }) + + ring.update(0.95) + + // ACK (best-effort) + ackFileChunks(agent, resolvedFd).catch(() => {}) + + // Sanitize filename and trigger browser save + const fileName = sanitizeFileName(header.fileName) + const blob = new Blob([content.buffer as ArrayBuffer]) + const url = URL.createObjectURL(blob) + const a = document.createElement('a') + a.href = url + a.download = fileName + a.click() + URL.revokeObjectURL(url) + + ring.update(1) + statusText.textContent = 'Download complete' + } catch (err: any) { + showError(err?.message ?? String(err)) + } finally { + await backend.cleanup().catch(() => {}) + closeXFTPAgent(agent) + } + } +} + +function sanitizeFileName(name: string): string { + let s = name + // Strip path separators + s = s.replace(/[/\\]/g, '') + // Replace null/control characters + s = s.replace(/[\x00-\x1f\x7f]/g, '_') + // Strip Unicode bidi override characters + s = s.replace(/[\u202a-\u202e\u2066-\u2069]/g, '') + // Limit length + if (s.length > 255) s = s.slice(0, 255) + return s || 'download' +} + +function formatSize(bytes: number): string { + if (bytes < 1024) return bytes + ' B' + if (bytes < 1024 * 1024) return (bytes / 1024).toFixed(1) + ' KB' + return (bytes / (1024 * 1024)).toFixed(1) + ' MB' +} diff --git a/xftp-web/web/index.html b/xftp-web/web/index.html new file mode 100644 index 0000000000..86dfc0afb5 --- /dev/null +++ b/xftp-web/web/index.html @@ -0,0 +1,15 @@ + + + + + + + SimpleX File Transfer + + + +
+ + + diff --git a/xftp-web/web/main.ts b/xftp-web/web/main.ts new file mode 100644 index 0000000000..afb8b91d5d --- /dev/null +++ b/xftp-web/web/main.ts @@ -0,0 +1,24 @@ +import sodium from 'libsodium-wrappers-sumo' +import {initUpload} from './upload.js' +import {initDownload} from './download.js' + +async function main() { + await sodium.ready + + const app = document.getElementById('app')! + const hash = window.location.hash.slice(1) + + if (hash) { + initDownload(app, hash) + } else { + initUpload(app) + } +} + +main().catch(err => { + const app = document.getElementById('app') + if (app) { + app.innerHTML = `

Failed to initialize: ${err.message}

` + } + console.error(err) +}) diff --git a/xftp-web/web/progress.ts b/xftp-web/web/progress.ts new file mode 100644 index 0000000000..2fa292f27e --- /dev/null +++ b/xftp-web/web/progress.ts @@ -0,0 +1,52 @@ +const SIZE = 120 +const LINE_WIDTH = 8 +const RADIUS = (SIZE - LINE_WIDTH) / 2 +const CENTER = SIZE / 2 +const BG_COLOR = '#e0e0e0' +const FG_COLOR = '#3b82f6' + +export interface ProgressRing { + canvas: HTMLCanvasElement + update(fraction: number): void +} + +export function createProgressRing(): ProgressRing { + const canvas = document.createElement('canvas') + canvas.width = SIZE * devicePixelRatio + canvas.height = SIZE * devicePixelRatio + canvas.style.width = SIZE + 'px' + canvas.style.height = SIZE + 'px' + canvas.className = 'progress-ring' + const ctx = canvas.getContext('2d')! + ctx.scale(devicePixelRatio, devicePixelRatio) + + function draw(fraction: number) { + ctx.clearRect(0, 0, SIZE, SIZE) + // Background arc + ctx.beginPath() + ctx.arc(CENTER, CENTER, RADIUS, 0, 2 * Math.PI) + ctx.strokeStyle = BG_COLOR + ctx.lineWidth = LINE_WIDTH + ctx.lineCap = 'round' + ctx.stroke() + // Foreground arc + if (fraction > 0) { + ctx.beginPath() + ctx.arc(CENTER, CENTER, RADIUS, -Math.PI / 2, -Math.PI / 2 + 2 * Math.PI * fraction) + ctx.strokeStyle = FG_COLOR + ctx.lineWidth = LINE_WIDTH + ctx.lineCap = 'round' + ctx.stroke() + } + // Percentage text + const pct = Math.round(fraction * 100) + ctx.fillStyle = '#333' + ctx.font = '600 20px system-ui, sans-serif' + ctx.textAlign = 'center' + ctx.textBaseline = 'middle' + ctx.fillText(pct + '%', CENTER, CENTER) + } + + draw(0) + return {canvas, update: draw} +} diff --git a/xftp-web/web/servers.json b/xftp-web/web/servers.json new file mode 100644 index 0000000000..334fa57835 --- /dev/null +++ b/xftp-web/web/servers.json @@ -0,0 +1,18 @@ +{ + "simplex": [ + "xftp://da1aH3nOT-9G8lV7bWamhxpDYdJ1xmW7j3JpGaDR5Ug=@xftp1.simplex.im", + "xftp://5vog2Imy1ExJB_7zDZrkV1KDWi96jYFyy9CL6fndBVw=@xftp2.simplex.im", + "xftp://PYa32DdYNFWi0uZZOprWQoQpIk5qyjRJ3EF7bVpbsn8=@xftp3.simplex.im", + "xftp://k_GgQl40UZVV0Y4BX9ZTyMVqX5ZewcLW0waQIl7AYDE=@xftp4.simplex.im", + "xftp://-bIo6o8wuVc4wpZkZD3tH-rCeYaeER_0lz1ffQcSJDs=@xftp5.simplex.im", + "xftp://6nSvtY9pJn6PXWTAIMNl95E1Kk1vD7FM2TeOA64CFLg=@xftp6.simplex.im" + ], + "flux": [ + "xftp://92Sctlc09vHl_nAqF2min88zKyjdYJ9mgxRCJns5K2U=@xftp1.simplexonflux.com", + "xftp://YBXy4f5zU1CEhnbbCzVWTNVNsaETcAGmYqGNxHntiE8=@xftp2.simplexonflux.com", + "xftp://ARQO74ZSvv2OrulRF3CdgwPz_AMy27r0phtLSq5b664=@xftp3.simplexonflux.com", + "xftp://ub2jmAa9U0uQCy90O-fSUNaYCj6sdhl49Jh3VpNXP58=@xftp4.simplexonflux.com", + "xftp://Rh19D5e4Eez37DEE9hAlXDB3gZa1BdFYJTPgJWPO9OI=@xftp5.simplexonflux.com", + "xftp://0AznwoyfX8Od9T_acp1QeeKtxUi676IBIiQjXVwbdyU=@xftp6.simplexonflux.com" + ] +} diff --git a/xftp-web/web/servers.ts b/xftp-web/web/servers.ts new file mode 100644 index 0000000000..0c9c8b5855 --- /dev/null +++ b/xftp-web/web/servers.ts @@ -0,0 +1,16 @@ +import {parseXFTPServer, type XFTPServer} from '../src/protocol/address.js' +import presets from './servers.json' + +declare const __XFTP_SERVERS__: string[] + +const serverAddresses: string[] = typeof __XFTP_SERVERS__ !== 'undefined' + ? __XFTP_SERVERS__ + : [...presets.simplex, ...presets.flux] + +export function getServers(): XFTPServer[] { + return serverAddresses.map(parseXFTPServer) +} + +export function pickRandomServer(servers: XFTPServer[]): XFTPServer { + return servers[Math.floor(Math.random() * servers.length)] +} diff --git a/xftp-web/web/style.css b/xftp-web/web/style.css new file mode 100644 index 0000000000..3c5654a0e9 --- /dev/null +++ b/xftp-web/web/style.css @@ -0,0 +1,103 @@ +*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; } + +body { + font-family: system-ui, -apple-system, sans-serif; + background: #f5f5f5; + color: #333; + min-height: 100vh; + display: flex; + align-items: center; + justify-content: center; +} + +#app { + width: 100%; + max-width: 480px; + padding: 16px; +} + +.card { + background: #fff; + border-radius: 12px; + padding: 32px 24px; + box-shadow: 0 1px 3px rgba(0,0,0,.1); + text-align: center; +} + +h1 { + font-size: 1.25rem; + font-weight: 600; + margin-bottom: 24px; +} + +.stage { margin-top: 16px; } + +/* Drop zone */ +.drop-zone { + border: 2px dashed #ccc; + border-radius: 8px; + padding: 32px 16px; + transition: border-color .15s, background .15s; +} +.drop-zone.drag-over { + border-color: #3b82f6; + background: #eff6ff; +} + +/* Buttons */ +.btn { + display: inline-block; + padding: 10px 24px; + border: none; + border-radius: 6px; + background: #3b82f6; + color: #fff; + font-size: .9rem; + font-weight: 500; + cursor: pointer; + transition: background .15s; +} +.btn:hover { background: #2563eb; } +.btn-secondary { background: #6b7280; } +.btn-secondary:hover { background: #4b5563; } + +/* Hints */ +.hint { color: #999; font-size: .85rem; margin-top: 8px; } +.expiry { margin-top: 12px; } + +/* Progress */ +.progress-ring { display: block; margin: 0 auto 12px; } +#upload-status, #dl-status { font-size: .9rem; color: #666; margin-bottom: 12px; } + +/* Share link row */ +.link-row { + display: flex; + gap: 8px; + margin-top: 12px; +} +.link-row input { + flex: 1; + padding: 8px 10px; + border: 1px solid #ccc; + border-radius: 6px; + font-size: .85rem; + background: #f9fafb; +} + +/* Messages */ +.success { color: #16a34a; font-weight: 600; } +.error { color: #dc2626; font-weight: 500; margin-bottom: 12px; } + +/* Security note */ +.security-note { + margin-top: 20px; + padding: 12px; + background: #f0fdf4; + border-radius: 6px; + font-size: .8rem; + color: #555; + text-align: left; +} +.security-note p + p { margin-top: 6px; } +.security-note a { color: #3b82f6; text-decoration: none; } +.security-note a:hover { text-decoration: underline; } diff --git a/xftp-web/web/upload.ts b/xftp-web/web/upload.ts new file mode 100644 index 0000000000..b8b05bc896 --- /dev/null +++ b/xftp-web/web/upload.ts @@ -0,0 +1,164 @@ +import {createCryptoBackend} from './crypto-backend.js' +import {getServers, pickRandomServer} from './servers.js' +import {createProgressRing} from './progress.js' +import { + newXFTPAgent, closeXFTPAgent, uploadFile, encodeDescriptionURI, + type EncryptedFileMetadata +} from '../src/agent.js' + +const MAX_SIZE = 100 * 1024 * 1024 + +export function initUpload(app: HTMLElement) { + app.innerHTML = ` +
+

SimpleX File Transfer

+
+

Drag & drop a file here

+

or

+ + +

Max 100 MB

+
+ + + +
` + + const dropZone = document.getElementById('drop-zone')! + const fileInput = document.getElementById('file-input') as HTMLInputElement + const progressStage = document.getElementById('upload-progress')! + const completeStage = document.getElementById('upload-complete')! + const errorStage = document.getElementById('upload-error')! + const progressContainer = document.getElementById('progress-container')! + const statusText = document.getElementById('upload-status')! + const cancelBtn = document.getElementById('cancel-btn')! + const shareLink = document.getElementById('share-link') as HTMLInputElement + const copyBtn = document.getElementById('copy-btn')! + const errorMsg = document.getElementById('error-msg')! + const retryBtn = document.getElementById('retry-btn')! + + let aborted = false + let pendingFile: File | null = null + + dropZone.addEventListener('dragover', e => { e.preventDefault(); dropZone.classList.add('drag-over') }) + dropZone.addEventListener('dragleave', () => dropZone.classList.remove('drag-over')) + dropZone.addEventListener('drop', e => { + e.preventDefault() + dropZone.classList.remove('drag-over') + const f = e.dataTransfer?.files[0] + if (f) startUpload(f) + }) + fileInput.addEventListener('change', () => { + if (fileInput.files?.[0]) startUpload(fileInput.files[0]) + }) + retryBtn.addEventListener('click', () => { + if (pendingFile) startUpload(pendingFile) + }) + + function showStage(stage: HTMLElement) { + for (const s of [dropZone, progressStage, completeStage, errorStage]) s.hidden = true + stage.hidden = false + } + + function showError(msg: string) { + errorMsg.textContent = msg + showStage(errorStage) + } + + async function startUpload(file: File) { + pendingFile = file + aborted = false + + if (file.size > MAX_SIZE) { + showError(`File too large (${formatSize(file.size)}). Maximum is 100 MB.`) + return + } + if (file.size === 0) { + showError('File is empty.') + return + } + + showStage(progressStage) + const ring = createProgressRing() + progressContainer.innerHTML = '' + progressContainer.appendChild(ring.canvas) + statusText.textContent = 'Encrypting…' + + const backend = createCryptoBackend() + const agent = newXFTPAgent() + + cancelBtn.onclick = () => { + aborted = true + backend.cleanup().catch(() => {}) + closeXFTPAgent(agent) + showStage(dropZone) + } + + try { + const fileData = new Uint8Array(await file.arrayBuffer()) + if (aborted) return + + const encrypted = await backend.encrypt(fileData, file.name, (done, total) => { + ring.update(done / total * 0.3) + }) + if (aborted) return + + statusText.textContent = 'Uploading…' + const metadata: EncryptedFileMetadata = { + digest: encrypted.digest, + key: encrypted.key, + nonce: encrypted.nonce, + chunkSizes: encrypted.chunkSizes + } + const servers = getServers() + const server = pickRandomServer(servers) + const result = await uploadFile(agent, server, metadata, { + readChunk: (off, sz) => backend.readChunk(off, sz), + onProgress: (uploaded, total) => { + ring.update(0.3 + (uploaded / total) * 0.7) + } + }) + if (aborted) return + + const url = window.location.origin + window.location.pathname + '#' + result.uri + shareLink.value = url + showStage(completeStage) + copyBtn.onclick = () => { + navigator.clipboard.writeText(url).then(() => { + copyBtn.textContent = 'Copied!' + setTimeout(() => { copyBtn.textContent = 'Copy' }, 2000) + }) + } + } catch (err: any) { + if (!aborted) showError(err?.message ?? String(err)) + } finally { + await backend.cleanup().catch(() => {}) + closeXFTPAgent(agent) + } + } +} + +function formatSize(bytes: number): string { + if (bytes < 1024) return bytes + ' B' + if (bytes < 1024 * 1024) return (bytes / 1024).toFixed(1) + ' KB' + return (bytes / (1024 * 1024)).toFixed(1) + ' MB' +}