diff --git a/.vscode/launch.json b/.vscode/launch.json index 23205ab1f13f7..b005b8adf3aca 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -1192,4 +1192,4 @@ "description": "Usage: bun test [...]", }, ], -} +} \ No newline at end of file diff --git a/build.zig b/build.zig index 776585f3e896d..bc92dc8ac7767 100644 --- a/build.zig +++ b/build.zig @@ -157,7 +157,7 @@ pub fn build(b: *Build) !void { // TODO: Upgrade path for 0.14.0 // b.graph.zig_lib_directory = brk: { - // const sub_path = "src/deps/zig/lib"; + // const sub_path = "vendor/zig/lib"; // const dir = try b.build_root.handle.openDir(sub_path, .{}); // break :brk .{ .handle = dir, .path = try b.build_root.join(b.graph.arena, &.{sub_path}) }; // }; @@ -295,7 +295,7 @@ pub fn build(b: *Build) !void { bun_check_obj.generated_bin = null; step.dependOn(&bun_check_obj.step); - // The default install step will run zig build check This is so ZLS + // The default install step will run zig build check. This is so ZLS // identifies the codebase, as well as performs checking if build on // save is enabled. diff --git a/packages/bun-darwin-aarch64/.npmignore b/packages/bun-darwin-aarch64/.npmignore deleted file mode 100644 index 08d23cb2a7b5a..0000000000000 --- a/packages/bun-darwin-aarch64/.npmignore +++ /dev/null @@ -1,4 +0,0 @@ -bin/bun-profile -bin/*.o -*.o -*.a \ No newline at end of file diff --git a/packages/bun-linux-x64/.npmignore b/packages/bun-linux-x64/.npmignore deleted file mode 100644 index 08d23cb2a7b5a..0000000000000 --- a/packages/bun-linux-x64/.npmignore +++ /dev/null @@ -1,4 +0,0 @@ -bin/bun-profile -bin/*.o -*.o -*.a \ No newline at end of file diff --git a/packages/bun-plugin-css/README.md b/packages/bun-plugin-css/README.md deleted file mode 100644 index 9abf5f6c0eb89..0000000000000 --- a/packages/bun-plugin-css/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# `bun-plugin-css` - -Not implemented. diff --git a/packages/bun-plugin-css/index.ts b/packages/bun-plugin-css/index.ts deleted file mode 100644 index bf67f6e7e0e1c..0000000000000 --- a/packages/bun-plugin-css/index.ts +++ /dev/null @@ -1 +0,0 @@ -throw new Error("Not implemented."); diff --git a/packages/bun-plugin-css/package.json b/packages/bun-plugin-css/package.json deleted file mode 100644 index 68b4020d90a43..0000000000000 --- a/packages/bun-plugin-css/package.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "name": "bun-plugin-css", - "version": "0.0.1-alpha.0", - "module": "index.ts", - "type": "module", - "files": [ - "index.ts", - "package.json" - ] -} diff --git a/packages/bun-plugin-lightningcss/README.md b/packages/bun-plugin-lightningcss/README.md deleted file mode 100644 index 7cbe8c64b320c..0000000000000 --- a/packages/bun-plugin-lightningcss/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# `bun-plugin-lightningcss` - -Not implemented. diff --git a/packages/bun-plugin-lightningcss/index.ts b/packages/bun-plugin-lightningcss/index.ts deleted file mode 100644 index bf67f6e7e0e1c..0000000000000 --- a/packages/bun-plugin-lightningcss/index.ts +++ /dev/null @@ -1 +0,0 @@ -throw new Error("Not implemented."); diff --git a/packages/bun-plugin-lightningcss/package.json b/packages/bun-plugin-lightningcss/package.json deleted file mode 100644 index 6aafdedd1bd26..0000000000000 --- a/packages/bun-plugin-lightningcss/package.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "name": "bun-plugin-lightningcss", - "version": "0.0.1-alpha.0", - "module": "index.ts", - "type": "module", - "files": [ - "index.ts", - "package.json" - ] -} diff --git a/packages/bun-plugin-mdx/README.md b/packages/bun-plugin-mdx/README.md deleted file mode 100644 index 65890d7c24e22..0000000000000 --- a/packages/bun-plugin-mdx/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# `bun-plugin-mdx` - -Not implemented. diff --git a/packages/bun-plugin-mdx/index.ts b/packages/bun-plugin-mdx/index.ts deleted file mode 100644 index bf67f6e7e0e1c..0000000000000 --- a/packages/bun-plugin-mdx/index.ts +++ /dev/null @@ -1 +0,0 @@ -throw new Error("Not implemented."); diff --git a/packages/bun-plugin-mdx/package.json b/packages/bun-plugin-mdx/package.json deleted file mode 100644 index 98047872f2890..0000000000000 --- a/packages/bun-plugin-mdx/package.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "name": "bun-plugin-mdx", - "version": "0.0.1-alpha.0", - "module": "index.ts", - "type": "module", - "files": [ - "index.ts", - "package.json" - ] -} diff --git a/packages/bun-plugin-server-components/.gitignore b/packages/bun-plugin-server-components/.gitignore deleted file mode 100644 index f81d56eaa35f6..0000000000000 --- a/packages/bun-plugin-server-components/.gitignore +++ /dev/null @@ -1,169 +0,0 @@ -# Based on https://raw.githubusercontent.com/github/gitignore/main/Node.gitignore - -# Logs - -logs -_.log -npm-debug.log_ -yarn-debug.log* -yarn-error.log* -lerna-debug.log* -.pnpm-debug.log* - -# Diagnostic reports (https://nodejs.org/api/report.html) - -report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json - -# Runtime data - -pids -_.pid -_.seed -\*.pid.lock - -# Directory for instrumented libs generated by jscoverage/JSCover - -lib-cov - -# Coverage directory used by tools like istanbul - -coverage -\*.lcov - -# nyc test coverage - -.nyc_output - -# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) - -.grunt - -# Bower dependency directory (https://bower.io/) - -bower_components - -# node-waf configuration - -.lock-wscript - -# Compiled binary addons (https://nodejs.org/api/addons.html) - -build/Release - -# Dependency directories - -node_modules/ -jspm_packages/ - -# Snowpack dependency directory (https://snowpack.dev/) - -web_modules/ - -# TypeScript cache - -\*.tsbuildinfo - -# Optional npm cache directory - -.npm - -# Optional eslint cache - -.eslintcache - -# Optional stylelint cache - -.stylelintcache - -# Microbundle cache - -.rpt2_cache/ -.rts2_cache_cjs/ -.rts2_cache_es/ -.rts2_cache_umd/ - -# Optional REPL history - -.node_repl_history - -# Output of 'npm pack' - -\*.tgz - -# Yarn Integrity file - -.yarn-integrity - -# dotenv environment variable files - -.env -.env.development.local -.env.test.local -.env.production.local -.env.local - -# parcel-bundler cache (https://parceljs.org/) - -.cache -.parcel-cache - -# Next.js build output - -.next -out - -# Nuxt.js build / generate output - -.nuxt -dist - -# Gatsby files - -.cache/ - -# Comment in the public line in if your project uses Gatsby and not Next.js - -# https://nextjs.org/blog/next-9-1#public-directory-support - -# public - -# vuepress build output - -.vuepress/dist - -# vuepress v2.x temp and cache directory - -.temp -.cache - -# Docusaurus cache and generated files - -.docusaurus - -# Serverless directories - -.serverless/ - -# FuseBox cache - -.fusebox/ - -# DynamoDB Local files - -.dynamodb/ - -# TernJS port file - -.tern-port - -# Stores VSCode versions used for testing VSCode extensions - -.vscode-test - -# yarn v2 - -.yarn/cache -.yarn/unplugged -.yarn/build-state.yml -.yarn/install-state.gz -.pnp.\* diff --git a/packages/bun-plugin-server-components/README.md b/packages/bun-plugin-server-components/README.md deleted file mode 100644 index 32ce31fede914..0000000000000 --- a/packages/bun-plugin-server-components/README.md +++ /dev/null @@ -1,149 +0,0 @@ -# `bun-plugin-server-components` - -The official Bun plugin for **server components**. - -## Installation - -```sh -bun add bun-plugin-server-components -d -``` - -## Context - -Server components are a new abstraction for building web applications. They look similar to standard React/JSX components, but render exclusively on the server. They differ from classic "client components" in a few ways: - -1. They can be `async`. -2. Their implementation can run privileged code like database queries. Normally this would be unsafe, because the source code of client components are typically bundled and sent to the client, where they can be inspected and reverse-engineered. Server components are never sent to the client, so they can run privileged code safely. -3. They _cannot_ contain stateful hooks like `useState` or `useEffect`. - -Server components require a deep integration with the bundler to work. To understand why, we need a bit of background on how server components work. - -### How server components work - -Imagine you have a server component that looks like this: - -```tsx -// index.tsx -import { Component } from "./Component"; -export default async function HomePage() { - return ( -
- -
- ); -} -``` - -This file imports a client component called `Component`. - -```ts -// ./Component.tsx -"use client"; - -export function Component() { - return
Hello world
; -} -``` - -To run this component we need to generate two builds. - -> Here the term "build" refers to a typical bundling step—the act of converting a set of entrypoints into a set of bundles. - -1. The first is our "server component build". It contains all the code we need to render `HomePage` to a component tree. When an incoming `Request` comes in, we can use React's built-in tools to convert this tree into a "virtual DOM stream" that we can return as a `Response`. -2. The second is our "client build". It contains the bundled versions of all client components that were referenced by our server components. - -The browser hits the server and gets back the "virtual DOM stream". The virtual DOM stream will contain references to client components, which will be loaded from the client bundle. React provides a built-in utility (`createFromFetch`)that accepts the VDOM stream, dynamically loads the necessary client components, and returns a renderable component. - -```ts -import { createRoot } from "react-dom/client"; -import { createFromFetch } from "react-server-dom-webpack/client.browser"; - -const stream = fetch("/", { headers: { Accept: "text/x-component" } }); -const data = createFromFetch(stream); - -const root = createRoot(document); -root.render(); -``` - -### Server-side rendering - -One potentially confusing aspect of server components is that they "return" virtual DOM. From the perspective of a server component, client components are black boxes. - -If we want to do server-side rendering, we need to render our server component to VDOM, _then_ render the VDOM to plain HTML. These are two distinct steps. The second step requires a _third build_, we we'll call the "SSR build". Like the "client build", this build will bundle all the client components. Unlike the "client build", those bundles will be intended for consumption on the server; in bundler terms, the build's `"target"` will be`"bun"` (or perhaps `"node"`). - -### Bundling server components - -That's a high-level overview of how server components work. The important takeaway is that we need to generate totally separate bundles for server and client components. - -But it's not just a simple matter of running two separate bundling scripts. The true "entrypoints" of our application are the server components. Over the course of bundling our server components, we will discover some files containing the `"use client"` directive; these files then become the entrypoints for our "client build", which will require a totally separate build configuration from the server build. - -The goal of this plugin is to hide the complexty of this multi-stage build from the user. - -## Usage - -To use this plugin: - -```ts -import ServerComponentsPlugin from "bun-plugin-server-components"; - -await Bun.build({ - entrypoints: ["./index.tsx"], // server component files - plugins: [ - ServerComponentsPlugin({ - // plugin configuration - }), - ], - // other configuration -}); -``` - -The `"entrypoints"` you pass into `Bun.build()` should be your _server components_. Bun's bundler will automatically detect any files containing the `"use client"` directive, and will use those files as entrypoints for the "client build" and "SSR build". The bundler configuration for these builds can be provided `client` and `ssr` keys respectively. - -```ts -import ServerComponentsPlugin from "bun-plugin-server-components"; - -await Bun.build({ - entrypoints: ["./index.tsx"], // server component files - outdir: "./build", - manifest: true, - plugins: [ServerComponentsPlugin({ - client: { - entrypoints: [], // optional - additional client entrypoints - outdir: "./build/client", // default: inherits from the main build - target: "browser", - plugins: [/* */], - } - ssr: { - entrypoints: [], // optional - additional SSR entrypoints - outdir: "./build/client", // default: inherits from the main build - target: "bun", // this is default - plugins: [/* */], - } - })], -}); -``` - -The result of `Bun.build()` will contain additional manifests for the SSR and client builds. - -```ts -const result = await Bun.build({ - // config - plugins: [ - ServerComponentsPlugin({ - /* config */ - }), - ], -}); - -// standard manifest -// for the top-level (server components) build -result.manifest; - -// manifest for client build -result.clientManifest; - -// manifest for client build -result.ssrManifest; -``` - -Once the build is complete, use the manifests to implement your RSC server. diff --git a/packages/bun-plugin-server-components/bun.lockb b/packages/bun-plugin-server-components/bun.lockb deleted file mode 100755 index 460954c699637..0000000000000 Binary files a/packages/bun-plugin-server-components/bun.lockb and /dev/null differ diff --git a/packages/bun-plugin-server-components/index.ts b/packages/bun-plugin-server-components/index.ts deleted file mode 100644 index 67d98eb769d2a..0000000000000 --- a/packages/bun-plugin-server-components/index.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { BunPlugin, BuildConfig } from "bun"; - -function Plugin(config: { client?: BuildConfig; ssr?: BuildConfig }): BunPlugin { - return { - name: "bun-plugin-server-components", - SECRET_SERVER_COMPONENTS_INTERNALS: config, - } as any; -} - -export default Plugin; diff --git a/packages/bun-plugin-server-components/package.json b/packages/bun-plugin-server-components/package.json deleted file mode 100644 index 9d28f36fd74c7..0000000000000 --- a/packages/bun-plugin-server-components/package.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "name": "bun-plugin-server-components", - "version": "0.0.1-alpha.0", - "module": "index.ts", - "type": "module", - "types": "index.ts", - "exports": { - ".": { - "import": "./index.ts", - "require": "./index.ts", - "default": "./index.js" - }, - "./package.json": "./package.json" - }, - "files": [ - "index.ts", - "tsconfig.json", - "package.json", - "modules.d.ts" - ], - "devDependencies": { - "@types/js-yaml": "^4.0.5" - }, - "dependencies": { - "bun-types": "canary", - "js-yaml": "^4.1.0" - } -} diff --git a/packages/bun-plugin-server-components/tsconfig.json b/packages/bun-plugin-server-components/tsconfig.json deleted file mode 100644 index a03219b2ebf8f..0000000000000 --- a/packages/bun-plugin-server-components/tsconfig.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "compilerOptions": { - "lib": ["ESNext"], - "module": "esnext", - "target": "esnext", - "moduleResolution": "bundler", - "moduleDetection": "force", - "allowImportingTsExtensions": true, - "noEmit": true, - "composite": true, - "strict": true, - "downlevelIteration": true, - "skipLibCheck": true, - "jsx": "react-jsx", - "allowSyntheticDefaultImports": true, - "forceConsistentCasingInFileNames": true, - "allowJs": true, - "types": [ - "bun-types" // add Bun global - ] - }, - "include": ["**/*.ts", "modules.d.ts"] -} diff --git a/src/Global.zig b/src/Global.zig index d3becfed78495..94b0bc70c3bdb 100644 --- a/src/Global.zig +++ b/src/Global.zig @@ -172,23 +172,16 @@ const string = bun.string; pub const BunInfo = struct { bun_version: string, platform: Analytics.GenerateHeader.GeneratePlatform.Platform, - framework: string = "", - framework_version: string = "", const Analytics = @import("./analytics/analytics_thread.zig"); const JSON = bun.JSON; const JSAst = bun.JSAst; - pub fn generate(comptime Bundler: type, bundler: Bundler, allocator: std.mem.Allocator) !JSAst.Expr { - var info = BunInfo{ + pub fn generate(comptime Bundler: type, _: Bundler, allocator: std.mem.Allocator) !JSAst.Expr { + const info = BunInfo{ .bun_version = Global.package_json_version, .platform = Analytics.GenerateHeader.GeneratePlatform.forOS(), }; - if (bundler.options.framework) |framework| { - info.framework = framework.package; - info.framework_version = framework.version; - } - return try JSON.toAST(allocator, BunInfo, info); } }; diff --git a/src/analytics/analytics_thread.zig b/src/analytics/analytics_thread.zig index 72bbadf6faf4b..5ea78ece20ebd 100644 --- a/src/analytics/analytics_thread.zig +++ b/src/analytics/analytics_thread.zig @@ -79,41 +79,44 @@ pub fn isCI() bool { /// This answers, "What parts of bun are people actually using?" pub const Features = struct { - /// Set right before JSC::initialize is called - pub var jsc: usize = 0; + pub var builtin_modules = std.enums.EnumSet(bun.JSC.HardcodedModule).initEmpty(); + pub var @"Bun.stderr": usize = 0; pub var @"Bun.stdin": usize = 0; pub var @"Bun.stdout": usize = 0; + pub var WebSocket: usize = 0; pub var abort_signal: usize = 0; + pub var binlinks: usize = 0; pub var bunfig: usize = 0; pub var define: usize = 0; pub var dotenv: usize = 0; pub var external: usize = 0; pub var extracted_packages: usize = 0; - /// Incremented for each call to `fetch` pub var fetch: usize = 0; - pub var filesystem_router: usize = 0; pub var git_dependencies: usize = 0; pub var html_rewriter: usize = 0; pub var http_server: usize = 0; pub var https_server: usize = 0; + /// Set right before JSC::initialize is called + pub var jsc: usize = 0; + /// Set when kit.DevServer is initialized + pub var kit_dev: usize = 0; pub var lifecycle_scripts: usize = 0; pub var loaders: usize = 0; pub var lockfile_migration_from_package_lock: usize = 0; pub var macros: usize = 0; + pub var no_avx2: usize = 0; + pub var no_avx: usize = 0; pub var shell: usize = 0; pub var spawn: usize = 0; + pub var standalone_executable: usize = 0; pub var standalone_shell: usize = 0; + /// Set when invoking a todo panic + pub var todo_panic: usize = 0; pub var transpiler_cache: usize = 0; - pub var tsconfig_paths: usize = 0; pub var tsconfig: usize = 0; + pub var tsconfig_paths: usize = 0; pub var virtual_modules: usize = 0; - pub var WebSocket: usize = 0; - pub var no_avx: usize = 0; - pub var no_avx2: usize = 0; - pub var binlinks: usize = 0; - pub var builtin_modules = std.enums.EnumSet(bun.JSC.HardcodedModule).initEmpty(); - pub var standalone_executable: usize = 0; pub var workers_spawned: usize = 0; pub var workers_terminated: usize = 0; diff --git a/src/api/schema.zig b/src/api/schema.zig index 10e25bb56169a..37fda386c517f 100644 --- a/src/api/schema.zig +++ b/src/api/schema.zig @@ -1671,12 +1671,6 @@ pub const Api = struct { /// extension_order extension_order: []const []const u8, - /// framework - framework: ?FrameworkConfig = null, - - /// router - router: ?RouteConfig = null, - /// no_summary no_summary: ?bool = null, diff --git a/src/ast/base.zig b/src/ast/base.zig index 26f3815e2455f..701557a426aaf 100644 --- a/src/ast/base.zig +++ b/src/ast/base.zig @@ -48,6 +48,9 @@ pub const Index = packed struct(u32) { pub const invalid = Index{ .value = std.math.maxInt(Int) }; pub const runtime = Index{ .value = 0 }; + pub const kit_server_data = Index{ .value = 1 }; + pub const kit_client_data = Index{ .value = 2 }; + pub const Int = u32; pub inline fn source(num: anytype) Index { @@ -229,6 +232,8 @@ pub const Ref = packed struct(u64) { *const std.ArrayList(js_ast.Symbol) => symbol_table.items, *std.ArrayList(js_ast.Symbol) => symbol_table.items, []js_ast.Symbol => symbol_table, + *js_ast.Symbol.Map => return symbol_table.get(ref) orelse + unreachable, // ref must exist within symbol table else => |T| @compileError("Unsupported type to Ref.getSymbol: " ++ @typeName(T)), }; return &resolved_symbol_table[ref.innerIndex()]; diff --git a/src/baby_list.zig b/src/baby_list.zig index d304f7549684e..baa07cc2de1cb 100644 --- a/src/baby_list.zig +++ b/src/baby_list.zig @@ -45,7 +45,7 @@ pub fn BabyList(comptime Type: type) type { return this.len > 0 and @intFromPtr(item.ptr) >= @intFromPtr(this.ptr) and @intFromPtr(item.ptr) < @intFromPtr(this.ptr) + this.len; } - pub inline fn initConst(items: []const Type) ListType { + pub fn initConst(items: []const Type) callconv(bun.callconv_inline) ListType { @setRuntimeSafety(false); return ListType{ // Remove the const qualifier from the items @@ -204,24 +204,24 @@ pub fn BabyList(comptime Type: type) type { }; } - pub inline fn first(this: ListType) ?*Type { + pub fn first(this: ListType) callconv(bun.callconv_inline) ?*Type { return if (this.len > 0) this.ptr[0] else @as(?*Type, null); } - pub inline fn last(this: ListType) ?*Type { + pub fn last(this: ListType) callconv(bun.callconv_inline) ?*Type { return if (this.len > 0) &this.ptr[this.len - 1] else @as(?*Type, null); } - pub inline fn first_(this: ListType) Type { + pub fn first_(this: ListType) callconv(bun.callconv_inline) Type { return this.ptr[0]; } - pub inline fn at(this: ListType, index: usize) *const Type { + pub fn at(this: ListType, index: usize) callconv(bun.callconv_inline) *const Type { bun.assert(index < this.len); return &this.ptr[index]; } - pub inline fn mut(this: ListType, index: usize) *Type { + pub fn mut(this: ListType, index: usize) callconv(bun.callconv_inline) *Type { bun.assert(index < this.len); return &this.ptr[index]; } @@ -236,7 +236,7 @@ pub fn BabyList(comptime Type: type) type { }; } - pub inline fn @"[0]"(this: ListType) Type { + pub fn @"[0]"(this: ListType) callconv(bun.callconv_inline) Type { return this.ptr[0]; } const OOM = error{OutOfMemory}; @@ -259,7 +259,7 @@ pub fn BabyList(comptime Type: type) type { this.update(list__); } - pub inline fn slice(this: ListType) []Type { + pub fn slice(this: ListType) callconv(bun.callconv_inline) []Type { @setRuntimeSafety(false); return this.ptr[0..this.len]; } @@ -273,6 +273,7 @@ pub fn BabyList(comptime Type: type) type { this.update(list_); return this.len - initial; } + pub fn writeLatin1(this: *@This(), allocator: std.mem.Allocator, str: []const u8) !u32 { if (comptime Type != u8) @compileError("Unsupported for type " ++ @typeName(Type)); @@ -282,6 +283,7 @@ pub fn BabyList(comptime Type: type) type { this.update(new); return this.len - initial; } + pub fn writeUTF16(this: *@This(), allocator: std.mem.Allocator, str: []const u16) !u32 { if (comptime Type != u8) @compileError("Unsupported for type " ++ @typeName(Type)); diff --git a/src/bun.js/api/BunObject.zig b/src/bun.js/api/BunObject.zig index c22f173445302..21e729c6382b9 100644 --- a/src/bun.js/api/BunObject.zig +++ b/src/bun.js/api/BunObject.zig @@ -61,7 +61,6 @@ pub const BunObject = struct { pub const Glob = toJSGetter(Bun.getGlobConstructor); pub const Transpiler = toJSGetter(Bun.getTranspilerConstructor); pub const argv = toJSGetter(Bun.getArgv); - pub const assetPrefix = toJSGetter(Bun.getAssetPrefix); pub const cwd = toJSGetter(Bun.getCWD); pub const enableANSIColors = toJSGetter(Bun.enableANSIColors); pub const hash = toJSGetter(Bun.getHashObject); @@ -121,7 +120,6 @@ pub const BunObject = struct { @export(BunObject.Glob, .{ .name = getterName("Glob") }); @export(BunObject.Transpiler, .{ .name = getterName("Transpiler") }); @export(BunObject.argv, .{ .name = getterName("argv") }); - @export(BunObject.assetPrefix, .{ .name = getterName("assetPrefix") }); @export(BunObject.cwd, .{ .name = getterName("cwd") }); @export(BunObject.enableANSIColors, .{ .name = getterName("enableANSIColors") }); @export(BunObject.hash, .{ .name = getterName("hash") }); @@ -254,161 +252,13 @@ const zlib = @import("../../zlib.zig"); const Which = @import("../../which.zig"); const ErrorableString = JSC.ErrorableString; const is_bindgen = JSC.is_bindgen; -const max_addressible_memory = std.math.maxInt(u56); +const max_addressable_memory = std.math.maxInt(u56); const glob = @import("../../glob.zig"); const Async = bun.Async; const SemverObject = @import("../../install/semver.zig").SemverObject; const Braces = @import("../../shell/braces.zig"); const Shell = @import("../../shell/shell.zig"); -threadlocal var css_imports_list_strings: [512]ZigString = undefined; -threadlocal var css_imports_list: [512]Api.StringPointer = undefined; -threadlocal var css_imports_list_tail: u16 = 0; -threadlocal var css_imports_buf: std.ArrayList(u8) = undefined; -threadlocal var css_imports_buf_loaded: bool = false; - -threadlocal var routes_list_strings: [1024]ZigString = undefined; - -pub fn onImportCSS( - resolve_result: *const Resolver.Result, - import_record: *ImportRecord, - origin: URL, -) void { - if (!css_imports_buf_loaded) { - css_imports_buf = std.ArrayList(u8).initCapacity( - VirtualMachine.get().allocator, - import_record.path.text.len, - ) catch unreachable; - css_imports_buf_loaded = true; - } - - const writer = css_imports_buf.writer(); - const offset = css_imports_buf.items.len; - css_imports_list[css_imports_list_tail] = .{ - .offset = @as(u32, @truncate(offset)), - .length = 0, - }; - getPublicPath(resolve_result.path_pair.primary.text, origin, @TypeOf(writer), writer); - const length = css_imports_buf.items.len - offset; - css_imports_list[css_imports_list_tail].length = @as(u32, @truncate(length)); - css_imports_list_tail += 1; -} - -pub fn flushCSSImports() void { - if (css_imports_buf_loaded) { - css_imports_buf.clearRetainingCapacity(); - css_imports_list_tail = 0; - } -} - -pub fn getCSSImports() []ZigString { - const tail = css_imports_list_tail; - for (0..tail) |i| { - ZigString.fromStringPointer(css_imports_list[i], css_imports_buf.items, &css_imports_list_strings[i]); - } - return css_imports_list_strings[0..tail]; -} - -const ShellTask = struct { - arena: std.heap.Arena, - script: std.ArrayList(u8), - interpreter: Shell.InterpreterSync, - - pub const AsyncShellTask = JSC.ConcurrentPromiseTask(ShellTask); -}; - -pub fn shell( - globalThis: *JSC.JSGlobalObject, - callframe: *JSC.CallFrame, -) JSC.JSValue { - const Interpreter = @import("../../shell/interpreter.zig").Interpreter; - - // var allocator = globalThis.bunVM().allocator; - const allocator = getAllocator(globalThis); - var arena = bun.ArenaAllocator.init(allocator); - - const arguments_ = callframe.arguments(8); - var arguments = JSC.Node.ArgumentsSlice.init(globalThis.bunVM(), arguments_.slice()); - const string_args = arguments.nextEat() orelse { - globalThis.throw("shell: expected 2 arguments, got 0", .{}); - return .undefined; - }; - - const template_args_js = arguments.nextEat() orelse { - globalThis.throw("shell: expected 2 arguments, got 0", .{}); - return .undefined; - }; - var template_args = template_args_js.arrayIterator(globalThis); - var jsobjs = std.ArrayList(JSValue).init(arena.allocator()); - var script = std.ArrayList(u8).init(arena.allocator()); - - if (!(bun.shell.shellCmdFromJS(globalThis, string_args, &template_args, &jsobjs, &script) catch { - if (!globalThis.hasException()) - globalThis.throwOutOfMemory(); - return JSValue.undefined; - })) { - return .undefined; - } - - if (globalThis.hasException()) { - arena.deinit(); - return .undefined; - } - - const lex_result = brk: { - if (bun.strings.isAllASCII(script.items[0..])) { - var lexer = Shell.LexerAscii.new(arena.allocator(), script.items[0..]); - lexer.lex() catch |err| { - globalThis.throwError(err, "failed to lex shell"); - return JSValue.undefined; - }; - break :brk lexer.get_result(); - } - var lexer = Shell.LexerUnicode.new(arena.allocator(), script.items[0..]); - lexer.lex() catch |err| { - globalThis.throwError(err, "failed to lex shell"); - return JSValue.undefined; - }; - break :brk lexer.get_result(); - }; - - var parser = Shell.Parser.new(arena.allocator(), lex_result, jsobjs.items[0..]) catch |err| { - globalThis.throwError(err, "failed to create shell parser"); - return JSValue.undefined; - }; - - const script_ast = parser.parse() catch |err| { - globalThis.throwError(err, "failed to parse shell"); - return JSValue.undefined; - }; - - const script_heap = arena.allocator().create(Shell.AST.Script) catch { - globalThis.throwOutOfMemory(); - return JSValue.undefined; - }; - - script_heap.* = script_ast; - - const interpreter = Interpreter.init( - globalThis, - allocator, - &arena, - script_heap, - jsobjs.items[0..], - ) catch { - arena.deinit(); - return .false; - }; - _ = interpreter; // autofix - - // return interpreter; - return .undefined; - - // return interpreter.start(globalThis) catch { - // return .false; - // }; -} - pub fn shellEscape( globalThis: *JSC.JSGlobalObject, callframe: *JSC.CallFrame, @@ -902,13 +752,6 @@ pub fn getMain( return ZigString.init(vm.main).toJS(globalThis); } -pub fn getAssetPrefix( - globalThis: *JSC.JSGlobalObject, - _: *JSC.JSObject, -) JSC.JSValue { - return ZigString.init(VirtualMachine.get().bundler.options.routes.asset_prefix_path).toJS(globalThis); -} - pub fn getArgv( globalThis: *JSC.JSGlobalObject, _: *JSC.JSObject, @@ -994,7 +837,7 @@ pub fn getPublicPath(to: string, origin: URL, comptime Writer: type, writer: Wri to, VirtualMachine.get().bundler.fs.top_level_dir, origin, - VirtualMachine.get().bundler.options.routes.asset_prefix_path, + "", comptime Writer, writer, .loose, @@ -1286,22 +1129,6 @@ export fn Bun__resolveSyncWithSource( }; } -pub fn getPublicPathJS(globalObject: *JSC.JSGlobalObject, callframe: *JSC.CallFrame) JSC.JSValue { - const arguments = callframe.arguments(1).slice(); - if (arguments.len < 1) { - return bun.String.empty.toJS(globalObject); - } - var public_path_temp_str: bun.PathBuffer = undefined; - - const to = arguments[0].toSlice(globalObject, bun.default_allocator); - defer to.deinit(); - var stream = std.io.fixedBufferStream(&public_path_temp_str); - var writer = stream.writer(); - getPublicPath(to.slice(), VirtualMachine.get().origin, @TypeOf(&writer), &writer); - - return ZigString.init(stream.buffer[0..stream.pos]).toJS(globalObject); -} - extern fn dump_zone_malloc_stats() void; fn dump_mimalloc(globalObject: *JSC.JSGlobalObject, _: *JSC.CallFrame) JSC.JSValue { @@ -4377,7 +4204,7 @@ pub const FFIObject = struct { } } - if (addr > max_addressible_memory) { + if (addr > max_addressable_memory) { return JSC.toInvalidArguments("Pointer is outside max addressible memory, which usually means a bug in your program.", .{}, globalThis); } @@ -4459,7 +4286,7 @@ pub const FFIObject = struct { return .{ .err = JSC.toInvalidArguments("length must be > 0. This usually means a bug in your code.", .{}, globalThis) }; } - if (length_i > max_addressible_memory) { + if (length_i > max_addressable_memory) { return .{ .err = JSC.toInvalidArguments("length exceeds max addressable memory. This usually means a bug in your code.", .{}, globalThis) }; } diff --git a/src/bun.js/api/JSBundler.zig b/src/bun.js/api/JSBundler.zig index 7008c23069f8f..841ba7c48216c 100644 --- a/src/bun.js/api/JSBundler.zig +++ b/src/bun.js/api/JSBundler.zig @@ -100,59 +100,6 @@ pub const JSBundler = struct { globalThis.throwInvalidArguments("Expected plugin to be an object", .{}); return error.JSError; } - if (try plugin.getOwnObject(globalThis, "SECRET_SERVER_COMPONENTS_INTERNALS")) |internals| { - if (internals.getOwn(globalThis, "router")) |router_value| { - if (router_value.as(JSC.API.FileSystemRouter) != null) { - this.server_components.router.set(globalThis, router_value); - } else { - globalThis.throwInvalidArguments("Expected router to be a Bun.FileSystemRouter", .{}); - return error.JSError; - } - } - - const directive_object = (try internals.getOwnObject(globalThis, "directive")) orelse { - globalThis.throwInvalidArguments("Expected directive to be an object", .{}); - return error.JSError; - }; - - if (try directive_object.getArray(globalThis, "client")) |client_names_array| { - var array_iter = client_names_array.arrayIterator(globalThis); - while (array_iter.next()) |client_name| { - var slice = client_name.toSliceOrNull(globalThis) orelse { - globalThis.throwInvalidArguments("Expected directive.client to be an array of strings", .{}); - return error.JSError; - }; - defer slice.deinit(); - try this.server_components.client.append(allocator, try OwnedString.initCopy(allocator, slice.slice())); - } - } else { - globalThis.throwInvalidArguments("Expected directive.client to be an array of strings", .{}); - return error.JSError; - } - - if (try directive_object.getArray(globalThis, "server")) |server_names_array| { - var array_iter = server_names_array.arrayIterator(globalThis); - while (array_iter.next()) |server_name| { - var slice = server_name.toSliceOrNull(globalThis) orelse { - globalThis.throwInvalidArguments("Expected directive.server to be an array of strings", .{}); - return error.JSError; - }; - defer slice.deinit(); - try this.server_components.server.append(allocator, try OwnedString.initCopy(allocator, slice.slice())); - } - } else { - globalThis.throwInvalidArguments("Expected directive.server to be an array of strings", .{}); - return error.JSError; - } - - continue; - } - - // var decl = PluginDeclaration{ - // .name = OwnedString.initEmpty(allocator), - // .setup = .{}, - // }; - // defer decl.deinit(); if (plugin.getOwnOptional(globalThis, "name", ZigString.Slice) catch null) |slice| { defer slice.deinit(); diff --git a/src/bun.js/api/JSTranspiler.zig b/src/bun.js/api/JSTranspiler.zig index 2182ed6ccc950..4acb5bbb94a53 100644 --- a/src/bun.js/api/JSTranspiler.zig +++ b/src/bun.js/api/JSTranspiler.zig @@ -827,8 +827,9 @@ pub fn constructor( bundler.options.auto_import_jsx = transpiler_options.runtime.auto_import_jsx; bundler.options.inlining = transpiler_options.runtime.inlining; bundler.options.hot_module_reloading = transpiler_options.runtime.hot_module_reloading; - bundler.options.jsx.supports_fast_refresh = bundler.options.hot_module_reloading and - bundler.options.allow_runtime and transpiler_options.runtime.react_fast_refresh; + bundler.options.react_fast_refresh = bundler.options.hot_module_reloading and + bundler.options.allow_runtime and + transpiler_options.runtime.react_fast_refresh; const transpiler = allocator.create(Transpiler) catch unreachable; transpiler.* = Transpiler{ @@ -845,7 +846,7 @@ pub fn finalize( this: *Transpiler, ) callconv(.C) void { this.bundler.log.deinit(); - this.scan_pass_result.named_imports.deinit(); + this.scan_pass_result.named_imports.deinit(this.scan_pass_result.import_records.allocator); this.scan_pass_result.import_records.deinit(); this.scan_pass_result.used_symbols.deinit(); if (this.buffer_writer != null) { @@ -881,19 +882,7 @@ fn getParseResult(this: *Transpiler, allocator: std.mem.Allocator, code: []const // .allocator = this. }; - var parse_result = this.bundler.parse(parse_options, null); - - // necessary because we don't run the linker - if (parse_result) |*res| { - for (res.ast.import_records.slice()) |*import| { - if (import.kind.isCommonJS()) { - import.do_commonjs_transform_in_printer = true; - import.module_id = @as(u32, @truncate(bun.hash(import.path.pretty))); - } - } - } - - return parse_result; + return this.bundler.parse(parse_options, null); } pub fn scan( diff --git a/src/bun.js/bindings/bindings.cpp b/src/bun.js/bindings/bindings.cpp index 2c69eb1a9fdc0..675e58e9c8d75 100644 --- a/src/bun.js/bindings/bindings.cpp +++ b/src/bun.js/bindings/bindings.cpp @@ -4394,9 +4394,7 @@ static void fromErrorInstance(ZigException* except, JSC::JSGlobalObject* global, if (except->code == SYNTAX_ERROR_CODE) { except->message = Bun::toStringRef(err->sanitizedMessageString(global)); } else if (JSC::JSValue message = obj->getIfPropertyExists(global, vm.propertyNames->message)) { - except->message = Bun::toStringRef(global, message); - } else { except->message = Bun::toStringRef(err->sanitizedMessageString(global)); } @@ -4787,7 +4785,7 @@ void JSC__JSValue__toZigException(JSC__JSValue jsException, JSC__JSGlobalObject* if (JSC::Exception* jscException = JSC::jsDynamicCast(value)) { if (JSC::ErrorInstance* error = JSC::jsDynamicCast(jscException->value())) { - fromErrorInstance(exception, global, error, &jscException->stack(), value); + fromErrorInstance(exception, global, error, &jscException->stack(), jscException->value()); return; } } diff --git a/src/bun.js/bindings/bindings.zig b/src/bun.js/bindings/bindings.zig index 37b7f83500ad0..27d04a7f71afa 100644 --- a/src/bun.js/bindings/bindings.zig +++ b/src/bun.js/bindings/bindings.zig @@ -1764,6 +1764,10 @@ pub const JSString = extern struct { pub const name = "JSC::JSString"; pub const namespace = "JSC"; + pub fn toJS(str: *JSString) JSValue { + return JSValue.fromCell(str); + } + pub fn toObject(this: *JSString, global: *JSGlobalObject) ?*JSObject { return shim.cppFn("toObject", .{ this, global }); } @@ -6025,7 +6029,7 @@ pub const JSValue = enum(JSValueReprInt) { /// For native C++ classes extending JSCell, this retrieves s_info's name pub fn getClassInfoName(this: JSValue) ?bun.String { - if (!this.isObject()) return null; + if (!this.isCell()) return null; var out: bun.String = bun.String.empty; if (!JSC__JSValue__getClassInfoName(this, &out)) return null; return out; diff --git a/src/bun.js/event_loop.zig b/src/bun.js/event_loop.zig index f8ca831d13988..e0e976180856e 100644 --- a/src/bun.js/event_loop.zig +++ b/src/bun.js/event_loop.zig @@ -480,10 +480,7 @@ pub const Task = TaggedPointerUnion(.{ ShellAsyncSubprocessDone, TimerObject, bun.shell.Interpreter.Builtin.Yes.YesTask, - - bun.kit.DevServer.BundleTask, - bun.kit.DevServer.HotReloadTask, - + // bun.kit.DevServer.HotReloadTask, ProcessWaiterThreadTask, RuntimeTranspilerStore, ServerAllConnectionsClosedTask, @@ -1026,13 +1023,13 @@ pub const EventLoop = struct { // special case: we return return 0; }, - @field(Task.Tag, @typeName(bun.kit.DevServer.HotReloadTask)) => { - const transform_task = task.get(bun.kit.DevServer.HotReloadTask).?; - transform_task.*.run(); - transform_task.deinit(); - // special case: we return - return 0; - }, + // @field(Task.Tag, @typeName(bun.kit.DevServer.HotReloadTask)) => { + // const transform_task = task.get(bun.kit.DevServer.HotReloadTask).?; + // transform_task.*.run(); + // transform_task.deinit(); + // // special case: we return + // return 0; + // }, @field(Task.Tag, typeBaseName(@typeName(FSWatchTask))) => { var transform_task: *FSWatchTask = task.get(FSWatchTask).?; transform_task.*.run(); @@ -1245,15 +1242,9 @@ pub const EventLoop = struct { var any: *ServerAllConnectionsClosedTask = task.get(ServerAllConnectionsClosedTask).?; any.runFromJSThread(virtual_machine); }, - @field(Task.Tag, typeBaseName(@typeName(bun.kit.DevServer.BundleTask))) => { - task.get(bun.kit.DevServer.BundleTask).?.completeOnMainThread(); - }, - else => if (Environment.allow_assert) { - bun.Output.prettyln("\nUnexpected tag: {s}\n", .{@tagName(task.tag())}); - } else { - log("\nUnexpected tag: {s}\n", .{@tagName(task.tag())}); - unreachable; + else => { + bun.Output.panic("Unexpected tag: {s}", .{@tagName(task.tag())}); }, } @@ -1702,8 +1693,7 @@ pub const MiniVM = struct { } pub inline fn incrementPendingUnrefCounter(this: @This()) void { - _ = this; // autofix - + _ = this; @panic("FIXME TODO"); } diff --git a/src/bun.js/javascript.zig b/src/bun.js/javascript.zig index 18f8f73e0b60d..71794e686e015 100644 --- a/src/bun.js/javascript.zig +++ b/src/bun.js/javascript.zig @@ -1178,7 +1178,7 @@ pub const VirtualMachine = struct { } } - pub fn reload(this: *VirtualMachine) void { + pub fn reload(this: *VirtualMachine, _: *HotReloader.HotReloadTask) void { Output.debug("Reloading...", .{}); const should_clear_terminal = !this.bundler.env.hasSetNoClearTerminalOnReload(!Output.enable_ansi_colors); if (this.hot_reload == .watch) { @@ -1620,7 +1620,6 @@ pub const VirtualMachine = struct { // Avoid reading from tsconfig.json & package.json when we're in standalone mode vm.bundler.configureLinkerWithAutoJSX(false); - try vm.bundler.configureFramework(false); vm.bundler.macro_context = js_ast.Macro.MacroContext.init(&vm.bundler); @@ -1725,14 +1724,9 @@ pub const VirtualMachine = struct { }; vm.bundler.configureLinker(); - try vm.bundler.configureFramework(false); vm.bundler.macro_context = js_ast.Macro.MacroContext.init(&vm.bundler); - if (opts.args.serve orelse false) { - vm.bundler.linker.onImportCSS = Bun.onImportCSS; - } - vm.global = ZigGlobalObject.create( vm.console, -1, @@ -1873,14 +1867,9 @@ pub const VirtualMachine = struct { vm.bundler.configureLinkerWithAutoJSX(false); } - try vm.bundler.configureFramework(false); vm.smol = opts.smol; vm.bundler.macro_context = js_ast.Macro.MacroContext.init(&vm.bundler); - if (opts.args.serve orelse false) { - vm.bundler.linker.onImportCSS = Bun.onImportCSS; - } - vm.global = ZigGlobalObject.create( vm.console, @as(i32, @intCast(worker.execution_context_id)), @@ -1964,14 +1953,9 @@ pub const VirtualMachine = struct { }; vm.bundler.configureLinker(); - try vm.bundler.configureFramework(false); vm.bundler.macro_context = js_ast.Macro.MacroContext.init(&vm.bundler); - if (opts.args.serve orelse false) { - vm.bundler.linker.onImportCSS = Bun.onImportCSS; - } - vm.regular_event_loop.virtual_machine = vm; vm.smol = opts.smol; @@ -2423,18 +2407,18 @@ pub const VirtualMachine = struct { } if (JSC.HardcodedModule.Aliases.getWithEql(specifier, bun.String.eqlComptime, jsc_vm.bundler.options.target)) |hardcoded| { - if (hardcoded.tag == .none) { - resolveMaybeNeedsTrailingSlash( - res, - global, - bun.String.init(hardcoded.path), - source, - query_string, - is_esm, - is_a_file_path, - ); - return; - } + // if (hardcoded.tag == .none) { + // resolveMaybeNeedsTrailingSlash( + // res, + // global, + // bun.String.init(hardcoded.path), + // source, + // query_string, + // is_esm, + // is_a_file_path, + // ); + // return; + // } res.* = ErrorableString.ok(bun.String.init(hardcoded.path)); return; @@ -2674,8 +2658,7 @@ pub const VirtualMachine = struct { )) { .success => |r| r, .failure => |e| { - { - } + {} this.log.addErrorFmt( null, logger.Loc.Empty, @@ -4027,9 +4010,18 @@ pub const VirtualMachine = struct { return instance; } + /// To satisfy the interface from NewHotReloader() + pub fn getLoaders(vm: *VirtualMachine) *bun.options.Loader.HashTable { + return &vm.bundler.options.loaders; + } + + /// To satisfy the interface from NewHotReloader() + pub fn bustDirCache(vm: *VirtualMachine, path: []const u8) bool { + return vm.bundler.resolver.bustDirCache(path); + } + comptime { - if (!JSC.is_bindgen) - _ = Bun__remapStackFramePositions; + _ = Bun__remapStackFramePositions; } }; @@ -4120,7 +4112,7 @@ pub fn NewHotReloader(comptime Ctx: type, comptime EventLoopType: type, comptime // get another hot reload request while we're reloading, we'll // still enqueue it. while (this.reloader.pending_count.swap(0, .monotonic) > 0) { - this.reloader.ctx.reload(); + this.reloader.ctx.reload(this); } } diff --git a/src/bun.js/module_loader.zig b/src/bun.js/module_loader.zig index d6a5e0daeaa80..e2e79283cbfba 100644 --- a/src/bun.js/module_loader.zig +++ b/src/bun.js/module_loader.zig @@ -667,10 +667,10 @@ pub const RuntimeTranspilerStore = struct { // In a benchmarking loading @babel/standalone 100 times: // // After ensureHash: - // 354.00 ms 4.2% 354.00 ms WTF::StringImpl::hashSlowCase() const + // 354.00 ms 4.2% 354.00 ms WTF::StringImpl::hashSlowCase() const // // Before ensureHash: - // 506.00 ms 6.1% 506.00 ms WTF::StringImpl::hashSlowCase() const + // 506.00 ms 6.1% 506.00 ms WTF::StringImpl::hashSlowCase() const // result.ensureHash(); @@ -2173,12 +2173,6 @@ pub const ModuleLoader = struct { } } - if (jsc_vm.bundler.options.routes.asset_prefix_path.len > 0) { - if (strings.hasPrefix(slice, jsc_vm.bundler.options.routes.asset_prefix_path)) { - slice = slice[jsc_vm.bundler.options.routes.asset_prefix_path.len..]; - } - } - string_to_use_for_source.* = slice; if (strings.indexOfChar(slice, '?')) |i| { @@ -2821,7 +2815,7 @@ pub const HardcodedModule = enum { pub const Alias = struct { path: string, - tag: ImportRecord.Tag = ImportRecord.Tag.hardcoded, + tag: ImportRecord.Tag = .builtin, }; pub const Aliases = struct { diff --git a/src/bun.js/node/path_watcher.zig b/src/bun.js/node/path_watcher.zig index 3203d7365d008..dbbc6b6798700 100644 --- a/src/bun.js/node/path_watcher.zig +++ b/src/bun.js/node/path_watcher.zig @@ -27,8 +27,8 @@ const StringOrBytesToDecode = FSWatcher.FSWatchTaskWindows.StringOrBytesToDecode pub const PathWatcherManager = struct { const options = @import("../../options.zig"); - pub const Watcher = GenericWatcher.NewWatcher(*PathWatcherManager); const log = Output.scoped(.PathWatcherManager, false); + pub const Watcher = GenericWatcher.NewWatcher(*PathWatcherManager); main_watcher: *Watcher, watchers: bun.BabyList(?*PathWatcher) = .{}, @@ -148,6 +148,7 @@ pub const PathWatcherManager = struct { .current_fd_task = bun.FDHashMap(*DirectoryRegisterTask).init(bun.default_allocator), .watchers = watchers, .main_watcher = try Watcher.init( + // PathWatcherManager, this, vm.bundler.fs, bun.default_allocator, diff --git a/src/bun.js/web_worker.zig b/src/bun.js/web_worker.zig index 127c0751bc073..7f64f84842acf 100644 --- a/src/bun.js/web_worker.zig +++ b/src/bun.js/web_worker.zig @@ -249,11 +249,6 @@ pub const WebWorker = struct { var b = &vm.bundler; - b.configureRouter(false) catch { - this.flushLogs(); - this.exitAndDeinit(); - return; - }; b.configureDefines() catch { this.flushLogs(); this.exitAndDeinit(); diff --git a/src/bun.js/webcore/encoding.zig b/src/bun.js/webcore/encoding.zig index 6ac268370913c..cd8389eefd4d1 100644 --- a/src/bun.js/webcore/encoding.zig +++ b/src/bun.js/webcore/encoding.zig @@ -464,9 +464,9 @@ pub const TextEncoderStreamEncoder = struct { }; // In a previous benchmark, counting the length took about as much time as allocating the buffer. // - // Benchmark Time % CPU (ns) Iterations Ratio - // 288.00 ms 13.5% 288.00 ms simdutf::arm64::implementation::convert_latin1_to_utf8(char const*, unsigned long, char*) const - // 278.00 ms 13.0% 278.00 ms simdutf::arm64::implementation::utf8_length_from_latin1(char const*, unsigned long) const + // Benchmark Time % CPU (ns) Iterations Ratio + // 288.00 ms 13.5% 288.00 ms simdutf::arm64::implementation::convert_latin1_to_utf8(char const*, unsigned long, char*) const + // 278.00 ms 13.0% 278.00 ms simdutf::arm64::implementation::utf8_length_from_latin1(char const*, unsigned long) const // // var buffer = std.ArrayList(u8).initCapacity(bun.default_allocator, input.len + prepend_replacement_len) catch { diff --git a/src/bun.zig b/src/bun.zig index ca1f8415f46f1..db457eba7b050 100644 --- a/src/bun.zig +++ b/src/bun.zig @@ -34,8 +34,6 @@ pub const auto_allocator: std.mem.Allocator = if (!use_mimalloc) else @import("./memory_allocator.zig").auto_allocator; -pub const huge_allocator_threshold: comptime_int = @import("./memory_allocator.zig").huge_threshold; - pub const callmod_inline: std.builtin.CallModifier = if (builtin.mode == .Debug) .auto else .always_inline; pub const callconv_inline: std.builtin.CallingConvention = if (builtin.mode == .Debug) .Unspecified else .Inline; @@ -1000,13 +998,14 @@ pub const StringArrayHashMapContext = struct { pub const Prehashed = struct { value: u32, input: []const u8, + pub fn hash(this: @This(), s: []const u8) u32 { if (s.ptr == this.input.ptr and s.len == this.input.len) return this.value; return @as(u32, @truncate(std.hash.Wyhash.hash(0, s))); } - pub fn eql(_: @This(), a: []const u8, b: []const u8) bool { + pub fn eql(_: @This(), a: []const u8, b: []const u8, _: usize) bool { return strings.eqlLong(a, b, true); } }; @@ -2991,6 +2990,12 @@ pub noinline fn outOfMemory() noreturn { crash_handler.crashHandler(.out_of_memory, null, @returnAddress()); } +pub fn todoPanic(src: std.builtin.SourceLocation, comptime format: string, args: anytype) noreturn { + @setCold(true); + bun.Analytics.Features.todo_panic = 1; + Output.panic("TODO: " ++ format ++ " ({s}:{d})", args ++ .{ src.file, src.line }); +} + /// Wrapper around allocator.create(T) that safely initializes the pointer. Prefer this over /// `std.mem.Allocator.create`, but prefer using `bun.new` over `create(default_allocator, T, t)` pub fn create(allocator: std.mem.Allocator, comptime T: type, t: T) *T { diff --git a/src/bun_js.zig b/src/bun_js.zig index 7144c3ae49b7d..828a9b0b1de87 100644 --- a/src/bun_js.zig +++ b/src/bun_js.zig @@ -107,9 +107,6 @@ pub const Run = struct { b.options.env.behavior = .load_all_without_inlining; - b.configureRouter(false) catch { - failWithBuildError(vm); - }; b.configureDefines() catch { failWithBuildError(vm); }; @@ -252,9 +249,6 @@ pub const Run = struct { .unspecified => {}, } - b.configureRouter(false) catch { - failWithBuildError(vm); - }; b.configureDefines() catch { failWithBuildError(vm); }; diff --git a/src/bundler.zig b/src/bundler.zig index 540f879bd79d5..56406a9be0584 100644 --- a/src/bundler.zig +++ b/src/bundler.zig @@ -322,6 +322,12 @@ pub const PluginRunner = struct { } }; +/// This structure was the JavaScript bundler before bundle_v2 was written. It now +/// acts mostly as a configuration object, but it also contains stateful logic around +/// logging errors (.log) and module resolution (.resolve_queue) +/// +/// This object is not exclusive to bundle_v2/Bun.build, one of these is stored +/// on every VM so that the options can be used for transpilation. pub const Bundler = struct { options: options.BundleOptions, log: *logger.Log, @@ -581,15 +587,7 @@ pub const Bundler = struct { defer js_ast.Expr.Data.Store.reset(); defer js_ast.Stmt.Data.Store.reset(); - if (this.options.framework) |framework| { - if (this.options.target.isClient()) { - try this.options.loadDefines(this.allocator, this.env, &framework.client.env); - } else { - try this.options.loadDefines(this.allocator, this.env, &framework.server.env); - } - } else { - try this.options.loadDefines(this.allocator, this.env, &this.options.env); - } + try this.options.loadDefines(this.allocator, this.env, &this.options.env); if (this.options.define.dots.get("NODE_ENV")) |NODE_ENV| { if (NODE_ENV.len > 0 and NODE_ENV[0].data.value == .e_string and NODE_ENV[0].data.value.e_string.eqlComptime("production")) { @@ -598,97 +596,6 @@ pub const Bundler = struct { } } - pub fn configureFramework( - this: *Bundler, - comptime load_defines: bool, - ) !void { - if (this.options.framework) |*framework| { - if (framework.needsResolveFromPackage()) { - var route_config = this.options.routes; - var pair = PackageJSON.FrameworkRouterPair{ .framework = framework, .router = &route_config }; - - if (framework.development) { - try this.resolver.resolveFramework(framework.package, &pair, .development, load_defines); - } else { - try this.resolver.resolveFramework(framework.package, &pair, .production, load_defines); - } - - if (this.options.areDefinesUnset()) { - if (this.options.target.isClient()) { - this.options.env = framework.client.env; - } else { - this.options.env = framework.server.env; - } - } - - if (pair.loaded_routes) { - this.options.routes = route_config; - } - framework.resolved = true; - this.options.framework = framework.*; - } else if (!framework.resolved) { - Output.panic("directly passing framework path is not implemented yet!", .{}); - } - } - } - - pub fn configureFrameworkWithResolveResult(this: *Bundler, comptime client: bool) !?_resolver.Result { - if (this.options.framework != null) { - try this.configureFramework(true); - if (comptime client) { - if (this.options.framework.?.client.isEnabled()) { - return try this.resolver.resolve(this.fs.top_level_dir, this.options.framework.?.client.path, .stmt); - } - - if (this.options.framework.?.fallback.isEnabled()) { - return try this.resolver.resolve(this.fs.top_level_dir, this.options.framework.?.fallback.path, .stmt); - } - } else { - if (this.options.framework.?.server.isEnabled()) { - return try this.resolver.resolve(this.fs.top_level_dir, this.options.framework.?.server, .stmt); - } - } - } - - return null; - } - - pub fn configureRouter(this: *Bundler, comptime load_defines: bool) !void { - try this.configureFramework(load_defines); - defer { - if (load_defines) { - this.configureDefines() catch {}; - } - } - - if (this.options.routes.routes_enabled) { - const dir_info_ = try this.resolver.readDirInfo(this.options.routes.dir); - const dir_info = dir_info_ orelse return error.MissingRoutesDir; - - this.options.routes.dir = dir_info.abs_path; - - this.router = try Router.init(this.fs, this.allocator, this.options.routes); - try this.router.?.loadRoutes( - this.log, - dir_info, - Resolver, - &this.resolver, - this.fs.top_level_dir, - ); - this.router.?.routes.client_framework_enabled = this.options.isFrontendFrameworkEnabled(); - return; - } - - // If we get this far, it means they're trying to run the bundler without a preconfigured router - if (this.options.entry_points.len > 0) { - this.options.routes.routes_enabled = false; - } - - if (this.router) |*router| { - router.routes.client_framework_enabled = this.options.isFrontendFrameworkEnabled(); - } - } - pub fn resetStore(_: *const Bundler) void { js_ast.Expr.Data.Store.reset(); js_ast.Stmt.Data.Store.reset(); @@ -709,6 +616,7 @@ pub const Bundler = struct { input_fd: ?StoredFileDescriptorType, empty: bool = false, }; + pub fn buildWithResolveResult( bundler: *Bundler, resolve_result: _resolver.Result, @@ -985,7 +893,7 @@ pub const Bundler = struct { &writer, .esm, ), - .bun, .bun_macro => try bundler.print( + .bun, .bun_macro, .kit_server_components_ssr => try bundler.print( result, *js_printer.BufferPrinter, &writer, @@ -1157,8 +1065,7 @@ pub const Bundler = struct { js_ast.Symbol.Map.initList(symbols), source, false, - js_printer.Options{ - .externals = ast.externals, + .{ .runtime_imports = ast.runtime_imports, .require_ref = ast.require_ref, .css_import_behavior = bundler.options.cssImportBehavior(), @@ -1180,8 +1087,7 @@ pub const Bundler = struct { js_ast.Symbol.Map.initList(symbols), source, false, - js_printer.Options{ - .externals = ast.externals, + .{ .runtime_imports = ast.runtime_imports, .require_ref = ast.require_ref, .source_map_handler = source_map_context, @@ -1204,8 +1110,7 @@ pub const Bundler = struct { js_ast.Symbol.Map.initList(symbols), source, is_bun, - js_printer.Options{ - .externals = ast.externals, + .{ .runtime_imports = ast.runtime_imports, .require_ref = ast.require_ref, .css_import_behavior = bundler.options.cssImportBehavior(), @@ -1444,10 +1349,10 @@ pub const Bundler = struct { opts.features.react_fast_refresh = opts.features.hot_module_reloading and jsx.parse and - bundler.options.jsx.supports_fast_refresh; + bundler.options.react_fast_refresh; opts.filepath_hash_for_hmr = file_hash orelse 0; opts.features.auto_import_jsx = bundler.options.auto_import_jsx; - opts.warn_about_unbundled_modules = target.isNotBun(); + opts.warn_about_unbundled_modules = !target.isBun(); opts.features.inject_jest_globals = this_parse.inject_jest_globals; opts.features.minify_syntax = bundler.options.minify_syntax; diff --git a/src/bundler/bundle_v2.zig b/src/bundler/bundle_v2.zig index 877031490772f..580c97f04803a 100644 --- a/src/bundler/bundle_v2.zig +++ b/src/bundler/bundle_v2.zig @@ -43,7 +43,6 @@ // const Bundler = bun.Bundler; const bun = @import("root").bun; -const from = bun.from; const string = bun.string; const Output = bun.Output; const Global = bun.Global; @@ -127,6 +126,8 @@ const debugTreeShake = Output.scoped(.TreeShake, true); const BitSet = bun.bit_set.DynamicBitSetUnmanaged; const Async = bun.Async; +const kit = bun.kit; + const logPartDependencyTree = Output.scoped(.part_dep_tree, false); fn tracer(comptime src: std.builtin.SourceLocation, comptime name: [:0]const u8) bun.tracy.Ctx { @@ -326,131 +327,189 @@ const Watcher = bun.JSC.NewHotReloader(BundleV2, EventLoop, true); pub const BundleV2 = struct { bundler: *Bundler, + /// When Server Component is enabled, this is used for the client bundles + /// and `bundler` is used for the server bundles. client_bundler: *Bundler, - server_bundler: *Bundler, - graph: Graph = Graph{}, - linker: LinkerContext = LinkerContext{ .loop = undefined }, - bun_watcher: ?*Watcher.Watcher = null, - // kit_watcher: ?*bun.kit.DevServer.HotReloader.Watcher = null, - plugins: ?*JSC.API.JSBundler.Plugin = null, - completion: ?CompletionPtr = null, - source_code_length: usize = 0, - - // There is a race condition where an onResolve plugin may schedule a task on the bundle thread before it's parsing task completes + /// See kit.Framework.ServerComponents.separate_ssr_graph + ssr_bundler: *Bundler, + /// When Bun Kit is used, the resolved framework is passed here + framework: ?kit.Framework, + graph: Graph, + linker: LinkerContext, + bun_watcher: ?*Watcher.Watcher, + plugins: ?*JSC.API.JSBundler.Plugin, + completion: ?*JSBundleCompletionTask, + source_code_length: usize, + + /// There is a race condition where an onResolve plugin may schedule a task on the bundle thread before it's parsing task completes resolve_tasks_waiting_for_import_source_index: std.AutoArrayHashMapUnmanaged(Index.Int, BabyList(struct { to_source_index: Index, import_record_index: u32 })) = .{}, /// Allocations not tracked by a threadlocal heap - free_list: std.ArrayList(string) = std.ArrayList(string).init(bun.default_allocator), + free_list: std.ArrayList([]const u8) = std.ArrayList([]const u8).init(bun.default_allocator), unique_key: u64 = 0, dynamic_import_entry_points: std.AutoArrayHashMap(Index.Int, void) = undefined, - pub const CompletionPtr = union(enum) { - js: *JSBundleCompletionTask, - kit: *bun.kit.DevServer.BundleTask, - - pub fn log(ptr: CompletionPtr) *bun.logger.Log { - return switch (ptr) { - inline else => |inner| &inner.log, - }; - } + const KitOptions = struct { + framework: kit.Framework, + client_bundler: *Bundler, + ssr_bundler: *Bundler, }; + const ResolvedFramework = struct {}; + const debug = Output.scoped(.Bundle, false); pub inline fn loop(this: *BundleV2) *EventLoop { return &this.linker.loop; } - pub fn findReachableFiles(this: *BundleV2) ![]Index { - const trace = tracer(@src(), "findReachableFiles"); - defer trace.end(); + /// Most of the time, accessing .bundler directly is OK. This is only + /// needed when it is important to distinct between client and server + /// + /// Note that .log, .allocator, and other things are shared + /// between the three bundler configurations + pub inline fn bundlerForTarget(this: *BundleV2, target: options.Target) *Bundler { + return if (!this.bundler.options.server_components) + this.bundler + else switch (target) { + else => this.bundler, + .browser => this.client_bundler, + .kit_server_components_ssr => this.ssr_bundler, + }; + } - const Visitor = struct { - reachable: std.ArrayList(Index), - visited: bun.bit_set.DynamicBitSet = undefined, - all_import_records: []ImportRecord.List, - redirects: []u32, - redirect_map: PathToSourceIndexMap, - dynamic_import_entry_points: *std.AutoArrayHashMap(Index.Int, void), - - const MAX_REDIRECTS: usize = 64; - - // Find all files reachable from all entry points. This order should be - // deterministic given that the entry point order is deterministic, since the - // returned order is the postorder of the graph traversal and import record - // order within a given file is deterministic. - pub fn visit(v: *@This(), source_index: Index, was_dynamic_import: bool, comptime check_dynamic_imports: bool) void { - if (source_index.isInvalid()) return; - - if (v.visited.isSet(source_index.get())) { - if (comptime check_dynamic_imports) { - if (was_dynamic_import) { - v.dynamic_import_entry_points.put(source_index.get(), {}) catch unreachable; - } + /// Same semantics as bundlerForTarget for `path_to_source_index_map` + pub inline fn pathToSourceIndexMap(this: *BundleV2, target: options.Target) *PathToSourceIndexMap { + return if (!this.bundler.options.server_components) + &this.graph.path_to_source_index_map + else switch (target) { + else => &this.graph.path_to_source_index_map, + .browser => &this.graph.client_path_to_source_index_map, + .kit_server_components_ssr => &this.graph.ssr_path_to_source_index_map, + }; + } + + const ReachableFileVisitor = struct { + reachable: std.ArrayList(Index), + visited: bun.bit_set.DynamicBitSet, + all_import_records: []ImportRecord.List, + redirects: []u32, + redirect_map: PathToSourceIndexMap, + dynamic_import_entry_points: *std.AutoArrayHashMap(Index.Int, void), + /// Files which are Server Component Boundaries + scb_bitset: ?bun.bit_set.DynamicBitSetUnmanaged, + scb_list: ServerComponentBoundary.List.Slice, + + const MAX_REDIRECTS: usize = 64; + + // Find all files reachable from all entry points. This order should be + // deterministic given that the entry point order is deterministic, since the + // returned order is the postorder of the graph traversal and import record + // order within a given file is deterministic. + pub fn visit(v: *@This(), source_index: Index, was_dynamic_import: bool, comptime check_dynamic_imports: bool) void { + if (source_index.isInvalid()) return; + + if (v.visited.isSet(source_index.get())) { + if (comptime check_dynamic_imports) { + if (was_dynamic_import) { + v.dynamic_import_entry_points.put(source_index.get(), {}) catch unreachable; } - return; } - v.visited.set(source_index.get()); - - const import_record_list_id = source_index; - // when there are no import records, v index will be invalid - if (import_record_list_id.get() < v.all_import_records.len) { - const import_records = v.all_import_records[import_record_list_id.get()].slice(); - for (import_records) |*import_record| { - var other_source = import_record.source_index; - if (other_source.isValid()) { - var redirect_count: usize = 0; - while (getRedirectId(v.redirects[other_source.get()])) |redirect_id| : (redirect_count += 1) { - var other_import_records = v.all_import_records[other_source.get()].slice(); - const other_import_record = &other_import_records[redirect_id]; - import_record.source_index = other_import_record.source_index; - import_record.path = other_import_record.path; - other_source = other_import_record.source_index; - if (redirect_count == MAX_REDIRECTS) { - import_record.path.is_disabled = true; - import_record.source_index = Index.invalid; - break; - } - - // Handle redirects to a builtin or external module - // https://github.com/oven-sh/bun/issues/3764 - if (!other_source.isValid()) { - break; - } + return; + } + v.visited.set(source_index.get()); + + if (v.scb_bitset) |scb_bitset| { + if (scb_bitset.isSet(source_index.get())) { + const scb_index = v.scb_list.getIndex(source_index.get()) orelse unreachable; + v.visit(Index.init(v.scb_list.list.items(.reference_source_index)[scb_index]), false, check_dynamic_imports); + v.visit(Index.init(v.scb_list.list.items(.ssr_source_index)[scb_index]), false, check_dynamic_imports); + } + } + + const import_record_list_id = source_index; + // when there are no import records, v index will be invalid + if (import_record_list_id.get() < v.all_import_records.len) { + const import_records = v.all_import_records[import_record_list_id.get()].slice(); + for (import_records) |*import_record| { + var other_source = import_record.source_index; + if (other_source.isValid()) { + var redirect_count: usize = 0; + while (getRedirectId(v.redirects[other_source.get()])) |redirect_id| : (redirect_count += 1) { + var other_import_records = v.all_import_records[other_source.get()].slice(); + const other_import_record = &other_import_records[redirect_id]; + import_record.source_index = other_import_record.source_index; + import_record.path = other_import_record.path; + other_source = other_import_record.source_index; + if (redirect_count == MAX_REDIRECTS) { + import_record.path.is_disabled = true; + import_record.source_index = Index.invalid; + break; } - v.visit(import_record.source_index, check_dynamic_imports and import_record.kind == .dynamic, check_dynamic_imports); + // Handle redirects to a builtin or external module + // https://github.com/oven-sh/bun/issues/3764 + if (!other_source.isValid()) { + break; + } } - } - // Redirects replace the source file with another file - if (getRedirectId(v.redirects[source_index.get()])) |redirect_id| { - const redirect_source_index = v.all_import_records[source_index.get()].slice()[redirect_id].source_index.get(); - v.visit(Index.source(redirect_source_index), was_dynamic_import, check_dynamic_imports); - return; + v.visit(import_record.source_index, check_dynamic_imports and import_record.kind == .dynamic, check_dynamic_imports); } } - // Each file must come after its dependencies - v.reachable.append(source_index) catch unreachable; - if (comptime check_dynamic_imports) { - if (was_dynamic_import) { - v.dynamic_import_entry_points.put(source_index.get(), {}) catch unreachable; - } + // Redirects replace the source file with another file + if (getRedirectId(v.redirects[source_index.get()])) |redirect_id| { + const redirect_source_index = v.all_import_records[source_index.get()].slice()[redirect_id].source_index.get(); + v.visit(Index.source(redirect_source_index), was_dynamic_import, check_dynamic_imports); + return; } } - }; + + // Each file must come after its dependencies + v.reachable.append(source_index) catch unreachable; + if (comptime check_dynamic_imports) { + if (was_dynamic_import) { + v.dynamic_import_entry_points.put(source_index.get(), {}) catch unreachable; + } + } + } + }; + + pub fn findReachableFiles(this: *BundleV2) ![]Index { + const trace = tracer(@src(), "findReachableFiles"); + defer trace.end(); + + // Create a quick index for server-component boundaries. + // We need to mark the generated files as reachable, or else many files will appear missing. + var sfa = std.heap.stackFallback(4096, this.graph.allocator); + const stack_alloc = sfa.get(); + var scb_bitset = if (this.graph.server_component_boundaries.list.len > 0) brk: { + var scb_bitset = try bun.bit_set.DynamicBitSetUnmanaged.initEmpty(stack_alloc, this.graph.input_files.len); + const scbs = this.graph.server_component_boundaries.list.slice(); + for (scbs.items(.source_index)) |source_index| { + scb_bitset.set(source_index); + // insert the other one? + } + break :brk scb_bitset; + } else null; + defer if (scb_bitset) |*b| b.deinit(stack_alloc); this.dynamic_import_entry_points = std.AutoArrayHashMap(Index.Int, void).init(this.graph.allocator); - var visitor = Visitor{ + var visitor = ReachableFileVisitor{ .reachable = try std.ArrayList(Index).initCapacity(this.graph.allocator, this.graph.entry_points.items.len + 1), .visited = try bun.bit_set.DynamicBitSet.initEmpty(this.graph.allocator, this.graph.input_files.len), .redirects = this.graph.ast.items(.redirect_import_record_index), .all_import_records = this.graph.ast.items(.import_records), .redirect_map = this.graph.path_to_source_index_map, .dynamic_import_entry_points = &this.dynamic_import_entry_points, + .scb_bitset = scb_bitset, + .scb_list = if (scb_bitset != null) + this.graph.server_component_boundaries.slice() + else + undefined, // will never be read since the above bitset is `null` }; defer visitor.visited.deinit(); @@ -489,14 +548,15 @@ pub const BundleV2 = struct { import_record: bun.JSC.API.JSBundler.Resolve.MiniImportRecord, target: options.Target, ) void { - var resolve_result = this.bundler.resolver.resolve( + const bundler = this.bundlerForTarget(target); + var resolve_result = bundler.resolver.resolve( Fs.PathName.init(import_record.source_file).dirWithTrailingSlash(), import_record.specifier, import_record.kind, ) catch |err| { var handles_import_errors = false; var source: ?*const Logger.Source = null; - const log = this.completion.?.log(); + const log = &this.completion.?.log; if (import_record.importer_source_index) |importer| { var record: *ImportRecord = &this.graph.ast.items(.import_records)[importer].slice()[import_record.import_record_index]; @@ -518,7 +578,7 @@ pub const BundleV2 = struct { if (!handles_import_errors) { if (isPackagePath(import_record.specifier)) { - if (target.isWebLike() and options.ExternalModules.isNodeBuiltin(path_to_use)) { + if (target == .browser and options.ExternalModules.isNodeBuiltin(path_to_use)) { addError( log, source, @@ -582,7 +642,7 @@ pub const BundleV2 = struct { if (path.pretty.ptr == path.text.ptr) { // TODO: outbase - const rel = bun.path.relativePlatform(this.bundler.fs.top_level_dir, path.text, .loose, false); + const rel = bun.path.relativePlatform(bundler.fs.top_level_dir, path.text, .loose, false); path.pretty = this.graph.allocator.dupe(u8, rel) catch bun.outOfMemory(); } path.assertPrettyIsValid(); @@ -597,15 +657,9 @@ pub const BundleV2 = struct { } } - const entry = this.graph.path_to_source_index_map.getOrPut(this.graph.allocator, path.hashKey()) catch bun.outOfMemory(); + const entry = this.pathToSourceIndexMap(target).getOrPut(this.graph.allocator, path.hashKey()) catch bun.outOfMemory(); if (!entry.found_existing) { - path.* = path.dupeAllocFixPretty(this.graph.allocator) catch bun.outOfMemory(); - - // We need to parse this - const source_index = Index.init(@as(u32, @intCast(this.graph.ast.len))); - entry.value_ptr.* = source_index.get(); - out_source_index = source_index; - this.graph.ast.append(bun.default_allocator, JSAst.empty) catch unreachable; + path.* = this.pathWithPrettyInitialized(path.*, target) catch bun.outOfMemory(); const loader = brk: { if (import_record.importer_source_index) |importer| { var record: *ImportRecord = &this.graph.ast.items(.import_records)[importer].slice()[import_record.import_record_index]; @@ -614,43 +668,20 @@ pub const BundleV2 = struct { } } - break :brk path.loader(&this.bundler.options.loaders) orelse options.Loader.file; + break :brk path.loader(&bundler.options.loaders) orelse options.Loader.file; }; - - this.graph.input_files.append(bun.default_allocator, .{ - .source = .{ + const idx = this.enqueueParseTask( + &resolve_result, + .{ .path = path.*, .key_path = path.*, .contents = "", - .index = source_index, - }, - .loader = loader, - .side_effects = switch (loader) { - .text, .json, .toml, .file => _resolver.SideEffects.no_side_effects__pure_data, - else => _resolver.SideEffects.has_side_effects, }, - }) catch bun.outOfMemory(); - var task = this.graph.allocator.create(ParseTask) catch bun.outOfMemory(); - task.* = ParseTask.init(&resolve_result, source_index, this); - task.loader = loader; - task.jsx = this.bundler.options.jsx; - task.task.node.next = null; - task.tree_shaking = this.linker.options.tree_shaking; - task.known_target = import_record.original_target; - - _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .monotonic); - - // Handle onLoad plugins - if (!this.enqueueOnLoadPluginIfNeeded(task)) { - if (loader.shouldCopyForBundling()) { - var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; - additional_files.push(this.graph.allocator, .{ .source_index = task.source_index.get() }) catch unreachable; - this.graph.input_files.items(.side_effects)[source_index.get()] = _resolver.SideEffects.no_side_effects__pure_data; - this.graph.estimated_file_loader_count += 1; - } - - this.graph.pool.pool.schedule(ThreadPoolLib.Batch.from(&task.task)); - } + loader, + import_record.original_target, + ) catch bun.outOfMemory(); + entry.value_ptr.* = idx; + out_source_index = Index.init(idx); } else { out_source_index = Index.init(entry.value_ptr.*); } @@ -669,6 +700,7 @@ pub const BundleV2 = struct { batch: *ThreadPoolLib.Batch, resolve: _resolver.Result, is_entry_point: bool, + target: options.Target, ) !?Index.Int { var result = resolve; var path = result.path() orelse return null; @@ -681,12 +713,7 @@ pub const BundleV2 = struct { const source_index = Index.source(this.graph.input_files.len); const loader = this.bundler.options.loaders.get(path.name.ext) orelse .file; - if (path.pretty.ptr == path.text.ptr) { - // TODO: outbase - const rel = bun.path.relativePlatform(this.bundler.fs.top_level_dir, path.text, .loose, false); - path.pretty = this.graph.allocator.dupe(u8, rel) catch bun.outOfMemory(); - } - path.* = try path.dupeAllocFixPretty(this.graph.allocator); + path.* = this.pathWithPrettyInitialized(path.*, target) catch bun.outOfMemory(); path.assertPrettyIsValid(); entry.value_ptr.* = source_index.get(); this.graph.ast.append(bun.default_allocator, JSAst.empty) catch unreachable; @@ -707,6 +734,7 @@ pub const BundleV2 = struct { task.task.node.next = null; task.tree_shaking = this.linker.options.tree_shaking; task.is_entry_point = is_entry_point; + task.known_target = target; // Handle onLoad plugins as entry points if (!this.enqueueOnLoadPluginIfNeeded(task)) { @@ -725,6 +753,7 @@ pub const BundleV2 = struct { pub fn init( bundler: *ThisBundler, + kit_options: ?KitOptions, allocator: std.mem.Allocator, event_loop: EventLoop, enable_reloading: bool, @@ -740,20 +769,35 @@ pub const BundleV2 = struct { this.* = .{ .bundler = bundler, .client_bundler = bundler, - .server_bundler = bundler, + .ssr_bundler = bundler, + .framework = null, .graph = .{ .pool = undefined, .heap = heap orelse try ThreadlocalArena.init(), .allocator = undefined, + .kit_referenced_server_data = false, + .kit_referenced_client_data = false, }, .linker = .{ .loop = event_loop, .graph = .{ .allocator = undefined, - .bundler_graph = undefined, }, }, + .bun_watcher = null, + .plugins = null, + .completion = null, + .source_code_length = 0, }; + if (kit_options) |ko| { + this.client_bundler = ko.client_bundler; + this.ssr_bundler = ko.ssr_bundler; + this.framework = ko.framework; + this.linker.framework = &this.framework.?; + bun.assert(bundler.options.server_components); + bun.assert(this.client_bundler.options.server_components); + bun.assert(this.ssr_bundler.options.server_components); + } this.linker.graph.allocator = this.graph.heap.allocator(); this.graph.allocator = this.linker.graph.allocator; this.bundler.allocator = this.graph.allocator; @@ -763,8 +807,8 @@ pub const BundleV2 = struct { this.bundler.log.clone_line_text = true; // We don't expose an option to disable this. Kit requires tree-shaking - // disabled since every export is always referenced in case a future - // module depends on a previously unused export. + // disabled since every export must is always exist in case a future + // module starts depending on it. if (this.bundler.options.output_format == .internal_kit_dev) { this.bundler.options.tree_shaking = false; this.bundler.resolver.opts.tree_shaking = false; @@ -773,7 +817,6 @@ pub const BundleV2 = struct { this.bundler.resolver.opts.tree_shaking = true; } - this.linker.graph.bundler_graph = &this.graph; this.linker.resolver = &this.bundler.resolver; this.linker.graph.code_splitting = bundler.options.code_splitting; this.graph.code_splitting = bundler.options.code_splitting; @@ -789,6 +832,7 @@ pub const BundleV2 = struct { this.linker.options.public_path = bundler.options.public_path; this.linker.options.target = bundler.options.target; this.linker.options.output_format = bundler.options.output_format; + this.linker.kit_dev_server = bundler.options.kit; var pool = try this.graph.allocator.create(ThreadPool); if (enable_reloading) { @@ -804,17 +848,10 @@ pub const BundleV2 = struct { thread_pool, ); - // sanity checks for kit - if (this.bundler.options.output_format == .internal_kit_dev) { - if (this.bundler.options.compile) @panic("TODO: internal_kit_dev does not support compile"); - if (this.bundler.options.code_splitting) @panic("TODO: internal_kit_dev does not support code splitting"); - if (this.bundler.options.transform_only) @panic("TODO: internal_kit_dev does not support transform_only"); - } - return this; } - pub fn enqueueEntryPoints(this: *BundleV2, user_entry_points: []const string) !ThreadPoolLib.Batch { + pub fn enqueueEntryPoints(this: *BundleV2, user_entry_points: []const []const u8, client_entry_points: []const []const u8) !ThreadPoolLib.Batch { var batch = ThreadPoolLib.Batch{}; { @@ -841,22 +878,9 @@ pub const BundleV2 = struct { batch.push(ThreadPoolLib.Batch.from(&runtime_parse_task.task)); } - if (this.bundler.router) |router| { - defer this.bundler.resetStore(); - Analytics.Features.filesystem_router += 1; - - const entry_points = try router.getEntryPoints(); - try this.graph.entry_points.ensureUnusedCapacity(this.graph.allocator, entry_points.len); - try this.graph.input_files.ensureUnusedCapacity(this.graph.allocator, entry_points.len); - try this.graph.path_to_source_index_map.ensureUnusedCapacity(this.graph.allocator, @as(u32, @truncate(entry_points.len))); - - for (entry_points) |entry_point| { - const resolved = this.bundler.resolveEntryPoint(entry_point) catch continue; - if (try this.enqueueItem(null, &batch, resolved, true)) |source_index| { - this.graph.entry_points.append(this.graph.allocator, Index.source(source_index)) catch unreachable; - } else {} - } - } else {} + // Kit has two source indexes which are computed at the end of the + // Scan+Parse phase, but reserved now so that resolution works. + try this.reserveSourceIndexesForKit(); { // Setup entry points @@ -866,7 +890,14 @@ pub const BundleV2 = struct { for (user_entry_points) |entry_point| { const resolved = this.bundler.resolveEntryPoint(entry_point) catch continue; - if (try this.enqueueItem(null, &batch, resolved, true)) |source_index| { + if (try this.enqueueItem(null, &batch, resolved, true, this.bundler.options.target)) |source_index| { + this.graph.entry_points.append(this.graph.allocator, Index.source(source_index)) catch unreachable; + } else {} + } + + for (client_entry_points) |entry_point| { + const resolved = this.bundler.resolveEntryPoint(entry_point) catch continue; + if (try this.enqueueItem(null, &batch, resolved, true, .browser)) |source_index| { this.graph.entry_points.append(this.graph.allocator, Index.source(source_index)) catch unreachable; } else {} } @@ -891,160 +922,256 @@ pub const BundleV2 = struct { } } - pub fn enqueueShadowEntryPoints(this: *BundleV2) !void { - const trace = tracer(@src(), "enqueueShadowEntryPoints"); - defer trace.end(); - const allocator = this.graph.allocator; - - // TODO: make this not slow - { - // process redirects - const initial_reachable = try this.findReachableFiles(); - allocator.free(initial_reachable); - this.dynamic_import_entry_points.deinit(); - } - - const bitset_length = this.graph.input_files.len; - var react_client_component_boundary = bun.bit_set.DynamicBitSet.initEmpty(allocator, bitset_length) catch unreachable; - defer react_client_component_boundary.deinit(); - var any_client = false; + /// This generates the two asts for 'bun:kit/client' and 'bun:kit/server'. Both are generated + /// at the same time in one pass over the SBC list. + pub fn processServerComponentManifestFiles(this: *BundleV2) OOM!void { + // If Kit is not being used, do nothing + const fw = this.framework orelse return; + const sc = fw.server_components orelse return; + + if (this.graph.kit_referenced_client_data) bun.todoPanic(@src(), "implement generation for 'bun:kit/client'", .{}); + if (!this.graph.kit_referenced_server_data) return; + + const alloc = this.graph.allocator; + + var server = try AstBuilder.init(this.graph.allocator, &kit.server_virtual_source, this.bundler.options.hot_module_reloading); + var client = try AstBuilder.init(this.graph.allocator, &kit.client_virtual_source, this.bundler.options.hot_module_reloading); + + var server_manifest_props: std.ArrayListUnmanaged(G.Property) = .{}; + var client_manifest_props: std.ArrayListUnmanaged(G.Property) = .{}; + + const scbs = this.graph.server_component_boundaries.list.slice(); + const sources = this.graph.input_files.items(.source); + const named_exports_array = this.graph.ast.items(.named_exports); + + const id_string = server.newExpr(E.String{ .data = "id" }); + const name_string = server.newExpr(E.String{ .data = "name" }); + const chunks_string = server.newExpr(E.String{ .data = "chunks" }); + const specifier_string = server.newExpr(E.String{ .data = "specifier_string" }); + const empty_array = server.newExpr(E.Array{}); + + for ( + scbs.items(.use_directive), + scbs.items(.source_index), + ) |use, source_id| { + const source = sources[source_id]; + if (use == .client) { + // TODO(@paperdave/kit): this file is being generated far too + // early. we don't know which exports are dead and which exports + // are live. Tree-shaking figures that out. However, + // tree-shaking happens after import binding, which would + // require this ast. + // + // The plan: change this to generate a stub ast which only has + // `export const serverManifest = undefined;`, and then + // re-generate this file later with the properly decided + // manifest. However, I will probably reconsider how this + // manifest is being generated when I write the whole + // "production build" part of Kit. + + const keys = named_exports_array[source_id].keys(); + const client_manifest_items = try alloc.alloc(G.Property, keys.len); + + const client_path = server.newExpr(E.String{ .data = source.path.pretty }); + const ssr_path = if (sc.separate_ssr_graph) + server.newExpr(E.String{ .data = try std.fmt.allocPrint(alloc, "ssr:{s}", .{source.path.pretty}) }) + else + client_path; + + for (keys, client_manifest_items) |export_name_string, *client_item| { + const server_key_string = try std.fmt.allocPrint(alloc, "{s}#{s}", .{ source.path.pretty, export_name_string }); + const export_name = server.newExpr(E.String{ .data = export_name_string }); + + // write dependencies on the underlying module, not the proxy + try server_manifest_props.append(alloc, .{ + .key = server.newExpr(E.String{ .data = server_key_string }), + .value = server.newExpr(E.Object{ + .properties = try G.Property.List.fromSlice(alloc, &.{ + .{ .key = id_string, .value = client_path }, + .{ .key = name_string, .value = export_name }, + .{ .key = chunks_string, .value = empty_array }, + }), + }), + }); + client_item.* = .{ + .key = export_name, + .value = server.newExpr(E.Object{ + .properties = try G.Property.List.fromSlice(alloc, &.{ + .{ .key = name_string, .value = export_name }, + .{ .key = specifier_string, .value = ssr_path }, + }), + }), + }; + } - // Loop #1: populate the list of files that are react client components - for (this.graph.use_directive_entry_points.items(.use_directive), this.graph.use_directive_entry_points.items(.source_index)) |use, source_id| { - if (use == .@"use client") { - any_client = true; - react_client_component_boundary.set(source_id); + try client_manifest_props.append(alloc, .{ + .key = client_path, + .value = server.newExpr(E.Object{ + .properties = G.Property.List.init(client_manifest_items), + }), + }); + } else { + bun.todoPanic(@src(), "\"use server\"", .{}); } } - this.graph.shadow_entry_point_range.loc.start = -1; - - var visit_queue = std.fifo.LinearFifo(Index.Int, .Dynamic).init(allocator); - visit_queue.ensureUnusedCapacity(64) catch unreachable; - defer visit_queue.deinit(); - const original_file_count = this.graph.entry_points.items.len; - - for (0..original_file_count) |entry_point_id| { - // we are modifying the array while iterating - // so we should be careful - const entry_point_source_index = this.graph.entry_points.items[entry_point_id]; + try server.appendStmt(S.Local{ + .kind = .k_const, + .decls = try G.Decl.List.fromSlice(alloc, &.{.{ + .binding = Binding.alloc(alloc, B.Identifier{ + .ref = try server.newSymbol(.other, "serverManifest"), + }, Logger.Loc.Empty), + .value = server.newExpr(E.Object{ + .properties = G.Property.List.fromList(server_manifest_props), + }), + }}), + .is_export = true, + }); + try server.appendStmt(S.Local{ + .kind = .k_const, + .decls = try G.Decl.List.fromSlice(alloc, &.{.{ + .binding = Binding.alloc(alloc, B.Identifier{ + .ref = try server.newSymbol(.other, "clientManifest"), + }, Logger.Loc.Empty), + .value = server.newExpr(E.Object{ + .properties = G.Property.List.fromList(client_manifest_props), + }), + }}), + .is_export = true, + }); - var all_imported_files = try bun.bit_set.DynamicBitSet.initEmpty(allocator, bitset_length); - defer all_imported_files.deinit(); - visit_queue.head = 0; - visit_queue.count = 0; - const input_path = this.graph.input_files.items(.source)[entry_point_source_index.get()].path; + this.graph.ast.set(Index.kit_server_data.get(), try server.toBundledAst()); + this.graph.ast.set(Index.kit_client_data.get(), try client.toBundledAst()); + } - { - const import_records = this.graph.ast.items(.import_records)[entry_point_source_index.get()]; - for (import_records.slice()) |import_record| { - if (!import_record.source_index.isValid()) { - continue; - } + pub fn enqueueParseTask( + this: *BundleV2, + resolve_result: *const _resolver.Result, + source: Logger.Source, + loader: Loader, + known_target: options.Target, + ) OOM!Index.Int { + const source_index = Index.init(@as(u32, @intCast(this.graph.ast.len))); + this.graph.ast.append(bun.default_allocator, JSAst.empty) catch unreachable; - if (all_imported_files.isSet(import_record.source_index.get())) { - continue; - } + this.graph.input_files.append(bun.default_allocator, .{ + .source = source, + .loader = loader, + .side_effects = switch (loader) { + .text, .json, .toml, .file => _resolver.SideEffects.no_side_effects__pure_data, + else => _resolver.SideEffects.has_side_effects, + }, + }) catch bun.outOfMemory(); + var task = this.graph.allocator.create(ParseTask) catch bun.outOfMemory(); + task.* = ParseTask.init(resolve_result, source_index, this); + task.loader = loader; + task.jsx = this.bundler.options.jsx; + task.task.node.next = null; + task.tree_shaking = this.linker.options.tree_shaking; + task.known_target = known_target; - all_imported_files.set(import_record.source_index.get()); + _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .monotonic); - try visit_queue.writeItem(import_record.source_index.get()); - } + // Handle onLoad plugins + if (!this.enqueueOnLoadPluginIfNeeded(task)) { + if (loader.shouldCopyForBundling()) { + var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; + additional_files.push(this.graph.allocator, .{ .source_index = task.source_index.get() }) catch unreachable; + this.graph.input_files.items(.side_effects)[source_index.get()] = _resolver.SideEffects.no_side_effects__pure_data; + this.graph.estimated_file_loader_count += 1; } - while (visit_queue.readItem()) |target_source_index| { - const import_records = this.graph.ast.items(.import_records)[target_source_index]; - for (import_records.slice()) |import_record| { - if (!import_record.source_index.isValid()) { - continue; - } + this.graph.pool.pool.schedule(ThreadPoolLib.Batch.from(&task.task)); + } - if (all_imported_files.isSet(import_record.source_index.get())) continue; - all_imported_files.set(import_record.source_index.get()); + return source_index.get(); + } - try visit_queue.writeItem(import_record.source_index.get()); - } - } + pub fn enqueueParseTask2( + this: *BundleV2, + source: Logger.Source, + loader: Loader, + known_target: options.Target, + ) OOM!Index.Int { + const source_index = Index.init(@as(u32, @intCast(this.graph.ast.len))); + this.graph.ast.append(bun.default_allocator, JSAst.empty) catch unreachable; - all_imported_files.setIntersection(react_client_component_boundary); - if (all_imported_files.findFirstSet() == null) continue; - const source_index = Index.init(@as(u32, @intCast(this.graph.ast.len))); + this.graph.input_files.append(bun.default_allocator, .{ + .source = source, + .loader = loader, + .side_effects = switch (loader) { + .text, .json, .toml, .file => _resolver.SideEffects.no_side_effects__pure_data, + else => _resolver.SideEffects.has_side_effects, + }, + }) catch bun.outOfMemory(); + var task = this.graph.allocator.create(ParseTask) catch bun.outOfMemory(); + task.* = .{ + .ctx = this, + .path = source.path, + .contents_or_fd = .{ + .contents = source.contents, + }, + .side_effects = .has_side_effects, + .jsx = this.bundler.options.jsx, + .source_index = source_index, + .module_type = .unknown, + .emit_decorator_metadata = false, // TODO + .package_version = "", + .loader = loader, + .tree_shaking = this.linker.options.tree_shaking, + .known_target = known_target, + }; + task.task.node.next = null; - var shadow = ShadowEntryPoint{ - .from_source_index = entry_point_source_index.get(), - .to_source_index = source_index.get(), - }; - var builder = ShadowEntryPoint.Builder{ - .ctx = this, - .source_code_buffer = MutableString.initEmpty(allocator), - .resolved_source_indices = std.ArrayList(Index.Int).init(allocator), - .shadow = &shadow, - }; + _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .monotonic); - var iter = all_imported_files.iterator(.{}); - while (iter.next()) |index| { - builder.addClientComponent(index); + // Handle onLoad plugins + if (!this.enqueueOnLoadPluginIfNeeded(task)) { + if (loader.shouldCopyForBundling()) { + var additional_files: *BabyList(AdditionalFile) = &this.graph.input_files.items(.additional_files)[source_index.get()]; + additional_files.push(this.graph.allocator, .{ .source_index = task.source_index.get() }) catch unreachable; + this.graph.input_files.items(.side_effects)[source_index.get()] = _resolver.SideEffects.no_side_effects__pure_data; + this.graph.estimated_file_loader_count += 1; } - bun.assert(builder.resolved_source_indices.items.len > 0); - const path = Fs.Path.initWithNamespace( - std.fmt.allocPrint( - allocator, - "{s}/{s}.client.js", - .{ input_path.name.dirOrDot(), input_path.name.base }, - ) catch unreachable, - "client-component", - ); + this.graph.pool.pool.schedule(ThreadPoolLib.Batch.from(&task.task)); + } + return source_index.get(); + } - if (this.graph.shadow_entry_point_range.loc.start < 0) { - this.graph.shadow_entry_point_range.loc.start = @as(i32, @intCast(source_index.get())); - } + /// Enqueue a ServerComponentParseTask. + /// `source_without_index` is copied and assigned a new source index. That index is returned. + pub fn enqueueServerComponentGeneratedFile( + this: *BundleV2, + data: ServerComponentParseTask.Data, + source_without_index: Logger.Source, + ) OOM!Index.Int { + var new_source: Logger.Source = source_without_index; + const source_index = this.graph.input_files.len; + new_source.index = Index.init(source_index); + try this.graph.input_files.append(default_allocator, .{ + .source = new_source, + .loader = .js, + .side_effects = .has_side_effects, + }); + try this.graph.ast.append(default_allocator, JSAst.empty); - this.graph.ast.append(bun.default_allocator, JSAst.empty) catch unreachable; - this.graph.shadow_entry_points.append(allocator, shadow) catch unreachable; - this.graph.input_files.append(bun.default_allocator, .{ - .source = .{ - .path = path, - .key_path = path, - .contents = builder.source_code_buffer.toOwnedSliceLeaky(), - .index = source_index, - }, - .loader = options.Loader.js, - .side_effects = _resolver.SideEffects.has_side_effects, - }) catch unreachable; + const task = bun.new(ServerComponentParseTask, .{ + .data = data, + .ctx = this, + .source = new_source, + }); - var task = bun.default_allocator.create(ParseTask) catch unreachable; - task.* = ParseTask{ - .ctx = this, - .path = path, - // unknown at this point: - .contents_or_fd = .{ - .contents = builder.source_code_buffer.toOwnedSliceLeaky(), - }, - .side_effects = _resolver.SideEffects.has_side_effects, - .jsx = this.bundler.options.jsx, - .source_index = source_index, - .module_type = .unknown, - .loader = options.Loader.js, - .tree_shaking = this.linker.options.tree_shaking, - .known_target = options.Target.browser, - .presolved_source_indices = builder.resolved_source_indices.items, - }; - task.task.node.next = null; - try this.graph.use_directive_entry_points.append(this.graph.allocator, js_ast.UseDirective.EntryPoint{ - .source_index = source_index.get(), - .use_directive = .@"use client", - }); + _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .monotonic); - _ = @atomicRmw(usize, &this.graph.parse_pending, .Add, 1, .monotonic); - this.graph.entry_points.append(allocator, source_index) catch unreachable; - this.graph.pool.pool.schedule(ThreadPoolLib.Batch.from(&task.task)); - this.graph.shadow_entry_point_range.len += 1; - } + this.graph.pool.pool.schedule(ThreadPoolLib.Batch.from(&task.task)); + + return @intCast(source_index); } pub fn generateFromCLI( bundler: *ThisBundler, + kit_options: ?KitOptions, allocator: std.mem.Allocator, event_loop: EventLoop, unique_key: u64, @@ -1053,14 +1180,14 @@ pub const BundleV2 = struct { minify_duration: *u64, source_code_size: *u64, ) !std.ArrayList(options.OutputFile) { - var this = try BundleV2.init(bundler, allocator, event_loop, enable_reloading, null, null); + var this = try BundleV2.init(bundler, kit_options, allocator, event_loop, enable_reloading, null, null); this.unique_key = unique_key; if (this.bundler.log.hasErrors()) { return error.BuildFailed; } - this.graph.pool.pool.schedule(try this.enqueueEntryPoints(this.bundler.options.entry_points)); + this.graph.pool.pool.schedule(try this.enqueueEntryPoints(this.bundler.options.entry_points, &.{})); if (this.bundler.log.hasErrors()) { return error.BuildFailed; @@ -1071,19 +1198,12 @@ pub const BundleV2 = struct { minify_duration.* = @as(u64, @intCast(@divTrunc(@as(i64, @truncate(std.time.nanoTimestamp())) - @as(i64, @truncate(bun.CLI.start_time)), @as(i64, std.time.ns_per_ms)))); source_code_size.* = this.source_code_length; - if (this.graph.use_directive_entry_points.len > 0) { - if (this.bundler.log.hasErrors()) { - return error.BuildFailed; - } - - try this.enqueueShadowEntryPoints(); - this.waitForParse(); - } - if (this.bundler.log.hasErrors()) { return error.BuildFailed; } + try this.processServerComponentManifestFiles(); + const reachable_files = try this.findReachableFiles(); reachable_files_count.* = reachable_files.len -| 1; // - 1 for the runtime @@ -1094,7 +1214,7 @@ pub const BundleV2 = struct { const chunks = try this.linker.link( this, this.graph.entry_points.items, - this.graph.use_directive_entry_points, + this.graph.server_component_boundaries, reachable_files, unique_key, ); @@ -1264,7 +1384,7 @@ pub const BundleV2 = struct { bundler.options.entry_points = config.entry_points.keys(); bundler.options.jsx = config.jsx; bundler.options.no_macros = config.no_macros; - bundler.options.react_server_components = config.server_components.client.items.len > 0 or config.server_components.server.items.len > 0; + bundler.options.server_components = config.server_components.client.items.len > 0 or config.server_components.server.items.len > 0; bundler.options.loaders = try options.loadersFromTransformOptions(allocator, config.loaders, config.target); bundler.options.entry_naming = config.names.entry_point.data; bundler.options.chunk_naming = config.names.chunk.data; @@ -1660,7 +1780,11 @@ pub const BundleV2 = struct { this.free_list.clearAndFree(); } - pub fn runFromJSInNewThread(this: *BundleV2, entry_points: []const []const u8) !std.ArrayList(options.OutputFile) { + pub fn runFromJSInNewThread( + this: *BundleV2, + entry_points: []const []const u8, + client_entry_points: []const []const u8, + ) !std.ArrayList(options.OutputFile) { this.unique_key = std.crypto.random.int(u64); if (this.bundler.log.errors > 0) { @@ -1672,7 +1796,7 @@ pub const BundleV2 = struct { bun.Mimalloc.mi_collect(true); } - this.graph.pool.pool.schedule(try this.enqueueEntryPoints(entry_points)); + this.graph.pool.pool.schedule(try this.enqueueEntryPoints(entry_points, client_entry_points)); // We must wait for all the parse tasks to complete, even if there are errors. this.waitForParse(); @@ -1686,6 +1810,8 @@ pub const BundleV2 = struct { return error.BuildFailed; } + try this.processServerComponentManifestFiles(); + try this.cloneAST(); if (comptime FeatureFlags.help_catch_memory_issues) { @@ -1700,7 +1826,7 @@ pub const BundleV2 = struct { const chunks = try this.linker.link( this, this.graph.entry_points.items, - this.graph.use_directive_entry_points, + this.graph.server_component_boundaries, reachable_files, this.unique_key, ); @@ -1718,7 +1844,7 @@ pub const BundleV2 = struct { import_record: *const ImportRecord, source_file: []const u8, import_record_index: u32, - original_target: ?options.Target, + original_target: options.Target, ) bool { if (this.plugins) |plugins| { if (plugins.hasAnyMatches(&import_record.path, false)) { @@ -1737,10 +1863,10 @@ pub const BundleV2 = struct { .source_file = source_file, .import_record_index = import_record_index, .importer_source_index = source_index, - .original_target = original_target orelse this.bundler.options.target, + .original_target = original_target, }, }, - this.completion.?.js, + this.completion.?, ); resolve.dispatch(); return true; @@ -1760,7 +1886,7 @@ pub const BundleV2 = struct { }); var load = bun.default_allocator.create(JSC.API.JSBundler.Load) catch unreachable; load.* = JSC.API.JSBundler.Load.create( - this.completion.?.js, + this.completion.?, parse.source_index, parse.path.loader(&this.bundler.options.loaders) orelse options.Loader.js, parse.path, @@ -1774,6 +1900,60 @@ pub const BundleV2 = struct { return false; } + fn pathWithPrettyInitialized(this: *BundleV2, path: Fs.Path, target: options.Target) !Fs.Path { + if (path.pretty.ptr != path.text.ptr) { + // TODO(@paperdave): there is a high chance this dupe is no longer required + return path.dupeAlloc(this.graph.allocator); + } + + // TODO: outbase + var buf: bun.PathBuffer = undefined; + const rel = bun.path.relativePlatform(this.bundler.fs.top_level_dir, path.text, .loose, false); + var path_clone = path; + // stack-allocated temporary is not leaked because dupeAlloc on the path will + // move .pretty into the heap. that function also fixes some slash issues. + if (target == .kit_server_components_ssr) { + // the SSR graph needs different pretty names or else HMR mode will + // confuse the two modules. + path_clone.pretty = std.fmt.bufPrint(&buf, "ssr:{s}", .{rel}) catch buf[0..]; + } else { + path_clone.pretty = rel; + } + return path_clone.dupeAllocFixPretty(this.graph.allocator); + } + + fn reserveSourceIndexesForKit(this: *BundleV2) !void { + const fw = this.framework orelse return; + _ = fw.server_components orelse return; + + // Call this after + bun.assert(this.graph.input_files.len == 1); + bun.assert(this.graph.ast.len == 1); + + try this.graph.ast.ensureUnusedCapacity(this.graph.allocator, 2); + try this.graph.input_files.ensureUnusedCapacity(this.graph.allocator, 2); + + const server_source = kit.server_virtual_source; + const client_source = kit.client_virtual_source; + + this.graph.input_files.appendAssumeCapacity(.{ + .source = server_source, + .loader = .js, + .side_effects = .no_side_effects__pure_data, + }); + this.graph.input_files.appendAssumeCapacity(.{ + .source = client_source, + .loader = .js, + .side_effects = .no_side_effects__pure_data, + }); + + bun.assert(this.graph.input_files.items(.source)[Index.kit_server_data.get()].index.get() == Index.kit_server_data.get()); + bun.assert(this.graph.input_files.items(.source)[Index.kit_client_data.get()].index.get() == Index.kit_client_data.get()); + + this.graph.ast.appendAssumeCapacity(JSAst.empty); + this.graph.ast.appendAssumeCapacity(JSAst.empty); + } + // TODO: remove ResolveQueue // // Moving this to the Bundle thread was a significant perf improvement on Linux for first builds @@ -1816,6 +1996,24 @@ pub const BundleV2 = struct { continue; } + if (this.framework) |fw| if (fw.server_components != null) { + switch (ast.target.isServerSide()) { + inline else => |is_server| { + const src = if (is_server) kit.server_virtual_source else kit.client_virtual_source; + if (strings.eqlComptime(import_record.path.text, src.path.pretty)) { + if (is_server) { + this.graph.kit_referenced_server_data = true; + } else { + this.graph.kit_referenced_client_data = true; + } + import_record.path.namespace = "bun"; + import_record.source_index = src.index; + continue; + } + }, + } + }; + if (ast.target.isBun()) { if (JSC.HardcodedModule.Aliases.get(import_record.path.text, options.Target.bun)) |replacement| { import_record.path.text = replacement.path; @@ -1870,7 +2068,47 @@ pub const BundleV2 = struct { continue; } - var resolve_result = this.bundler.resolver.resolve(source_dir, import_record.path.text, import_record.kind) catch |err| { + const bundler, const renderer: kit.Renderer, const target = + if (import_record.tag == .kit_resolve_to_ssr_graph) + brk: { + // TODO: consider moving this error into js_parser so it is caught more reliably + // Then we can assert(this.framework != null) + if (this.framework == null) { + this.bundler.log.addErrorFmt( + source, + import_record.range.loc, + this.graph.allocator, + "The 'bun_kit_graph' import attribute cannot be used outside of a Bun Kit bundle", + .{}, + ) catch @panic("unexpected log error"); + continue; + } + + const is_supported = this.framework.?.server_components != null and + this.framework.?.server_components.?.separate_ssr_graph; + if (!is_supported) { + this.bundler.log.addErrorFmt( + source, + import_record.range.loc, + this.graph.allocator, + "Framework does not have a separate SSR graph to put this import into", + .{}, + ) catch @panic("unexpected log error"); + continue; + } + + break :brk .{ + this.ssr_bundler, + .ssr, + .kit_server_components_ssr, + }; + } else .{ + this.bundlerForTarget(ast.target), + ast.target.kitRenderer(), + ast.target, + }; + + var resolve_result = bundler.resolver.resolve(source_dir, import_record.path.text, import_record.kind) catch |err| { // Disable failing packages from being printed. // This may cause broken code to write. // However, doing this means we tell them all the resolve errors @@ -1884,7 +2122,7 @@ pub const BundleV2 = struct { if (!import_record.handles_import_errors) { last_error = err; if (isPackagePath(import_record.path.text)) { - if (ast.target.isWebLike() and options.ExternalModules.isNodeBuiltin(import_record.path.text)) { + if (ast.target == .browser and options.ExternalModules.isNodeBuiltin(import_record.path.text)) { addError( this.bundler.log, source, @@ -1930,7 +2168,7 @@ pub const BundleV2 = struct { // if there were errors, lets go ahead and collect them all if (last_error != null) continue; - var path: *Fs.Path = resolve_result.path() orelse { + const path: *Fs.Path = resolve_result.path() orelse { import_record.path.is_disabled = true; import_record.source_index = Index.invalid; @@ -1945,9 +2183,19 @@ pub const BundleV2 = struct { continue; } + if (this.bundler.options.kit) |dev_server| { + if (!dev_server.isFileStale(path.text, renderer)) { + import_record.source_index = Index.invalid; + // TODO(paperdave/kit): this relative can be done without a clone in most cases + const rel = bun.path.relativePlatform(this.bundler.fs.top_level_dir, path.text, .loose, false); + import_record.path.pretty = this.graph.allocator.dupe(u8, rel) catch bun.outOfMemory(); + continue; + } + } + const hash_key = path.hashKey(); - if (this.graph.path_to_source_index_map.get(hash_key)) |id| { + if (this.pathToSourceIndexMap(target).get(hash_key)) |id| { import_record.source_index = Index.init(id); continue; } @@ -1955,16 +2203,10 @@ pub const BundleV2 = struct { const resolve_entry = resolve_queue.getOrPut(hash_key) catch bun.outOfMemory(); if (resolve_entry.found_existing) { import_record.path = resolve_entry.value_ptr.*.path; - continue; } - if (path.pretty.ptr == path.text.ptr) { - // TODO: outbase - const rel = bun.path.relativePlatform(this.bundler.fs.top_level_dir, path.text, .loose, false); - path.pretty = this.graph.allocator.dupe(u8, rel) catch bun.outOfMemory(); - } - path.* = path.dupeAllocFixPretty(this.graph.allocator) catch bun.outOfMemory(); + path.* = this.pathWithPrettyInitialized(path.*, target) catch bun.outOfMemory(); var secondary_path_to_copy: ?Fs.Path = null; if (resolve_result.path_pair.secondary) |*secondary| { @@ -1979,17 +2221,10 @@ pub const BundleV2 = struct { import_record.path = path.*; debug("created ParseTask: {s}", .{path.text}); - var resolve_task = bun.default_allocator.create(ParseTask) catch bun.outOfMemory(); - resolve_task.* = ParseTask.init(&resolve_result, null, this); - + const resolve_task = bun.default_allocator.create(ParseTask) catch bun.outOfMemory(); + resolve_task.* = ParseTask.init(&resolve_result, Index.invalid, this); resolve_task.secondary_path_for_commonjs_interop = secondary_path_to_copy; - - if (parse_result.value.success.use_directive != .none) { - resolve_task.known_target = ast.target; - } else { - resolve_task.known_target = ast.target; - } - + resolve_task.known_target = target; resolve_task.jsx.development = resolve_result.jsx.development; if (import_record.tag.loader()) |loader| { @@ -2072,51 +2307,23 @@ pub const BundleV2 = struct { ); } } - // else if (this.kit_watcher) |watcher| { - // if (empty_result.watcher_data.fd != .zero and empty_result.watcher_data.fd != bun.invalid_fd) { - // _ = watcher.addFile( - // empty_result.watcher_data.fd, - // input_files.items(.source)[empty_result.source_index.get()].path.text, - // bun.hash32(input_files.items(.source)[empty_result.source_index.get()].path.text), - // graph.input_files.items(.loader)[empty_result.source_index.get()], - // empty_result.watcher_data.dir_fd, - // null, - // false, - // ); - // } - // } }, .success => |*result| { result.log.cloneToWithRecycled(this.bundler.log, true) catch unreachable; - { - // to minimize contention, we add watcher here - if (this.bun_watcher) |watcher| { - if (result.watcher_data.fd != .zero and result.watcher_data.fd != bun.invalid_fd) { - _ = watcher.addFile( - result.watcher_data.fd, - result.source.path.text, - bun.hash32(result.source.path.text), - result.source.path.loader(&this.bundler.options.loaders) orelse options.Loader.file, - result.watcher_data.dir_fd, - result.watcher_data.package_json, - false, - ); - } + // to minimize contention, we add watcher on the bundling thread instead of the parsing thread. + if (this.bun_watcher) |watcher| { + if (result.watcher_data.fd != .zero and result.watcher_data.fd != bun.invalid_fd) { + _ = watcher.addFile( + result.watcher_data.fd, + result.source.path.text, + bun.hash32(result.source.path.text), + result.source.path.loader(&this.bundler.options.loaders) orelse options.Loader.file, + result.watcher_data.dir_fd, + result.watcher_data.package_json, + false, + ); } - // else if (this.kit_watcher) |watcher| { - // if (result.watcher_data.fd != .zero and result.watcher_data.fd != bun.invalid_fd) { - // _ = watcher.addFile( - // result.watcher_data.fd, - // result.source.path.text, - // bun.hash32(result.source.path.text), - // result.source.path.loader(&this.bundler.options.loaders) orelse options.Loader.file, - // result.watcher_data.dir_fd, - // result.watcher_data.package_json, - // false, - // ); - // } - // } } // Warning: this array may resize in this function call @@ -2138,17 +2345,18 @@ pub const BundleV2 = struct { var iter = resolve_queue.iterator(); + const path_to_source_index_map = this.pathToSourceIndexMap(result.ast.target); while (iter.next()) |entry| { const hash = entry.key_ptr.*; const value = entry.value_ptr.*; - var existing = graph.path_to_source_index_map.getOrPut(graph.allocator, hash) catch unreachable; + var existing = path_to_source_index_map.getOrPut(graph.allocator, hash) catch unreachable; // If the same file is imported and required, and those point to different files // Automatically rewrite it to the secondary one if (value.secondary_path_for_commonjs_interop) |secondary_path| { const secondary_hash = secondary_path.hashKey(); - if (graph.path_to_source_index_map.get(secondary_hash)) |secondary| { + if (path_to_source_index_map.get(secondary_hash)) |secondary| { existing.found_existing = true; existing.value_ptr.* = secondary; } @@ -2214,12 +2422,12 @@ pub const BundleV2 = struct { } for (import_records.slice(), 0..) |*record, i| { - if (graph.path_to_source_index_map.get(record.path.hashKey())) |source_index| { + if (path_to_source_index_map.get(record.path.hashKey())) |source_index| { record.source_index.value = source_index; if (getRedirectId(result.ast.redirect_import_record_index)) |compare| { if (compare == @as(u32, @truncate(i))) { - graph.path_to_source_index_map.put( + path_to_source_index_map.put( graph.allocator, result.source.path.hashKey(), source_index, @@ -2231,14 +2439,46 @@ pub const BundleV2 = struct { result.ast.import_records = import_records; graph.ast.set(result.source.index.get(), result.ast); - if (result.use_directive != .none) { - graph.use_directive_entry_points.append( + + // For files with use directives, index and prepare the other side. + if (result.use_directive != .none and + ((result.use_directive == .client) == (result.ast.target == .browser))) + { + if (result.use_directive == .server) + bun.todoPanic(@src(), "\"use server\"", .{}); + if (!this.framework.?.server_components.?.separate_ssr_graph) + bun.todoPanic(@src(), "implement 'separate_ssr_graph = false'", .{}); + + const reference_source_index = this.enqueueServerComponentGeneratedFile( + .{ .client_reference_proxy = .{ + .other_source = result.source, + .named_exports = result.ast.named_exports, + } }, + result.source, + ) catch bun.outOfMemory(); + + this.graph.path_to_source_index_map.put( graph.allocator, - .{ - .source_index = result.source.index.get(), - .use_directive = result.use_directive, - }, - ) catch unreachable; + result.source.path.hashKey(), + reference_source_index, + ) catch bun.outOfMemory(); + + var ssr_source = result.source; + ssr_source.path.pretty = ssr_source.path.text; + ssr_source.path = this.pathWithPrettyInitialized(ssr_source.path, .kit_server_components_ssr) catch bun.outOfMemory(); + const ssr_index = this.enqueueParseTask2( + ssr_source, + .tsx, + .kit_server_components_ssr, + ) catch bun.outOfMemory(); + + graph.server_component_boundaries.put( + graph.allocator, + result.source.index.get(), + result.use_directive, + reference_source_index, + ssr_index, + ) catch bun.outOfMemory(); } }, .err => |*err| { @@ -2262,18 +2502,29 @@ pub const BundleV2 = struct { }, } } + + /// To satisfy the interface from NewHotReloader() + pub fn getLoaders(vm: *BundleV2) *bun.options.Loader.HashTable { + return &vm.bundler.options.loaders; + } + + /// To satisfy the interface from NewHotReloader() + pub fn bustDirCache(vm: *BundleV2, path: []const u8) bool { + return vm.bundler.resolver.bustDirCache(path); + } }; /// Used to keep the bundle thread from spinning on Windows pub fn timerCallback(_: *bun.windows.libuv.Timer) callconv(.C) void {} -/// Used for Bun.build and Kit, as they asynchronously schedule multiple -/// bundles. To account for their respective differences, the scheduling code -/// is generalized over the Task structure. +/// Originally, kit.DevServer required a separate bundling thread, but that was +/// later removed. The bundling thread's scheduling logic is generalized over +/// the completion structure. +/// +/// CompletionStruct's interface: /// /// - `configureBundler` is used to configure `Bundler`. /// - `completeOnBundleThread` is used to tell the task that it is done. -/// pub fn BundleThread(CompletionStruct: type) type { return struct { const Self = @This(); @@ -2385,6 +2636,7 @@ pub fn BundleThread(CompletionStruct: type) type { const this = try BundleV2.init( bundler, + null, // TODO: Kit allocator, JSC.AnyEventLoop.init(allocator), false, @@ -2392,17 +2644,9 @@ pub fn BundleThread(CompletionStruct: type) type { heap, ); - // switch (CompletionStruct) { - // bun.kit.DevServer.BundleTask => { - // this.kit_watcher = completion.route.dev.bun_watcher; - // }, - // else => {}, - // } - this.plugins = completion.plugins; this.completion = switch (CompletionStruct) { - BundleV2.JSBundleCompletionTask => .{ .js = completion }, - bun.kit.DevServer.BundleTask => .{ .kit = completion }, + BundleV2.JSBundleCompletionTask => completion, else => @compileError("Unknown completion struct: " ++ CompletionStruct), }; completion.bundler = this; @@ -2417,7 +2661,7 @@ pub fn BundleThread(CompletionStruct: type) type { } errdefer { - // Wait for wait groups to finish. There still may be + // Wait for wait groups to finish. There still may be ongoing work. this.linker.source_maps.line_offset_wait_group.wait(); this.linker.source_maps.quoted_contents_wait_group.wait(); @@ -2428,7 +2672,7 @@ pub fn BundleThread(CompletionStruct: type) type { completion.result = .{ .value = .{ - .output_files = try this.runFromJSInNewThread(bundler.options.entry_points), + .output_files = try this.runFromJSInNewThread(bundler.options.entry_points, &.{}), }, }; @@ -2441,6 +2685,7 @@ pub fn BundleThread(CompletionStruct: type) type { } const UseDirective = js_ast.UseDirective; +const ServerComponentBoundary = js_ast.ServerComponentBoundary; pub const ParseTask = struct { path: Fs.Path, @@ -2458,7 +2703,7 @@ pub const ParseTask = struct { source_index: Index = Index.invalid, task: ThreadPoolLib.Task = .{ .callback = &callback }, tree_shaking: bool = false, - known_target: ?options.Target = null, + known_target: options.Target, module_type: options.ModuleType = .unknown, emit_decorator_metadata: bool = false, ctx: *BundleV2, @@ -2470,7 +2715,7 @@ pub const ParseTask = struct { const debug = Output.scoped(.ParseTask, false); - pub fn init(resolve_result: *const _resolver.Result, source_index: ?Index, ctx: *BundleV2) ParseTask { + pub fn init(resolve_result: *const _resolver.Result, source_index: Index, ctx: *BundleV2) ParseTask { return .{ .ctx = ctx, .path = resolve_result.path_pair.primary, @@ -2482,10 +2727,11 @@ pub const ParseTask = struct { }, .side_effects = resolve_result.primary_side_effects_data, .jsx = resolve_result.jsx, - .source_index = source_index orelse Index.invalid, + .source_index = source_index, .module_type = resolve_result.module_type, .emit_decorator_metadata = resolve_result.emit_decorator_metadata, .package_version = if (resolve_result.package_json) |package_json| package_json.version else "", + .known_target = ctx.bundler.options.target, }; } @@ -2611,16 +2857,16 @@ pub const ParseTask = struct { const parse_task = ParseTask{ .ctx = undefined, .path = Fs.Path.initWithNamespace("runtime", "bun:runtime"), - .side_effects = _resolver.SideEffects.no_side_effects__pure_data, - .jsx = options.JSX.Pragma{ + .side_effects = .no_side_effects__pure_data, + .jsx = .{ .parse = false, - // .supports_react_refresh = false, }, .contents_or_fd = .{ .contents = runtime_code, }, .source_index = Index.runtime, - .loader = Loader.js, + .loader = .js, + .known_target = target, }; const source = Logger.Source{ .path = parse_task.path, @@ -2630,6 +2876,7 @@ pub const ParseTask = struct { }; return .{ .parse_task = parse_task, .source = source }; } + fn getRuntimeSource(target: options.Target) RuntimeSource { return switch (target) { inline else => |t| comptime getRuntimeSourceComptime(t), @@ -2639,8 +2886,9 @@ pub const ParseTask = struct { pub const Result = struct { task: EventLoop.Task, ctx: *BundleV2, + value: Value, - value: union(Tag) { + pub const Value = union(Tag) { success: Success, err: Error, empty: struct { @@ -2648,7 +2896,7 @@ pub const ParseTask = struct { watcher_data: WatcherData = .{}, }, - }, + }; const WatcherData = struct { fd: bun.StoredFileDescriptorType = .zero, @@ -2893,37 +3141,6 @@ pub const ParseTask = struct { .fd => brk: { const trace = tracer(@src(), "readFile"); defer trace.end(); - if (bundler.options.framework) |framework| { - if (framework.override_modules_hashes.len > 0) { - const package_relative_path_hash = bun.hash(file_path.pretty); - if (std.mem.indexOfScalar( - u64, - framework.override_modules_hashes, - package_relative_path_hash, - )) |index| { - const relative_path = [_]string{ - framework.resolved_dir, - framework.override_modules.values[index], - }; - const override_path = bundler.fs.absBuf( - &relative_path, - &override_file_path_buf, - ); - override_file_path_buf[override_path.len] = 0; - const override_pathZ = override_file_path_buf[0..override_path.len :0]; - debug("{s} -> {s}", .{ file_path.text, override_path }); - break :brk try resolver.caches.fs.readFileWithAllocator( - allocator, - bundler.fs, - override_pathZ, - .zero, - false, - null, - ); - } - } - } - if (strings.eqlComptime(file_path.namespace, "node")) break :brk CacheEntry{ .contents = NodeFallbackModules.contentsFromPath(file_path.text) orelse "", @@ -2978,9 +3195,7 @@ pub const ParseTask = struct { const will_close_file_descriptor = task.contents_or_fd == .fd and !entry.fd.isStdio() and - (this.ctx.bun_watcher == null - // and this.ctx.kit_watcher == null - ); + (this.ctx.bun_watcher == null); if (will_close_file_descriptor) { _ = entry.closeFD(); } @@ -2993,13 +3208,24 @@ pub const ParseTask = struct { }; step.* = .parse; - const is_empty = entry.contents.len == 0 or (entry.contents.len < 33 and strings.trim(entry.contents, " \n\r").len == 0); + const is_empty = strings.isAllWhitespace(entry.contents); - const use_directive = if (!is_empty and bundler.options.react_server_components) - UseDirective.parse(entry.contents) + const use_directive: UseDirective = if (!is_empty and bundler.options.server_components) + if (UseDirective.parse(entry.contents)) |use| + use + else + .none else .none; + if ((use_directive == .client and task.known_target != .kit_server_components_ssr) or + (bundler.options.server_components and task.known_target == .browser)) + { + bundler = this.ctx.client_bundler; + resolver = &bundler.resolver; + bun.assert(bundler.options.target == .browser); + } + var source = Logger.Source{ .path = file_path, .key_path = file_path, @@ -3008,7 +3234,11 @@ pub const ParseTask = struct { .contents_is_recycled = false, }; - const target = targetFromHashbang(entry.contents) orelse use_directive.target(task.known_target orelse bundler.options.target); + const target = (if (task.source_index.get() == 1) targetFromHashbang(entry.contents) else null) orelse + if (task.known_target == .kit_server_components_ssr) + .kit_server_components_ssr + else + bundler.options.target; var opts = js_parser.Parser.Options.init(task.jsx, loader); opts.bundle = true; @@ -3027,8 +3257,10 @@ pub const ParseTask = struct { opts.features.emit_decorator_metadata = bundler.options.emit_decorator_metadata; opts.features.unwrap_commonjs_packages = bundler.options.unwrap_commonjs_packages; opts.features.hot_module_reloading = bundler.options.output_format == .internal_kit_dev and !source.index.isRuntime(); - opts.features.react_fast_refresh = (bundler.options.hot_module_reloading or bundler.options.react_fast_refresh) and - loader.isJSX() and !source.path.isNodeModule(); + opts.features.react_fast_refresh = target == .browser and + bundler.options.react_fast_refresh and + loader.isJSX() and + !source.path.isNodeModule(); opts.ignore_dce_annotations = bundler.options.ignore_dce_annotations and !source.index.isRuntime(); @@ -3047,7 +3279,6 @@ pub const ParseTask = struct { task.jsx.parse = loader.isJSX(); var unique_key_for_additional_file: []const u8 = ""; - var ast: JSAst = if (!is_empty) try getAST(log, bundler, opts, allocator, resolver, source, loader, task.ctx.unique_key, &unique_key_for_additional_file) else switch (opts.module_type == .esm) { @@ -3062,6 +3293,7 @@ pub const ParseTask = struct { }; ast.target = target; + if (ast.parts.len <= 1) { task.side_effects = .no_side_effects__empty_ast; } @@ -3075,11 +3307,7 @@ pub const ParseTask = struct { } } - // never a react client component if RSC is not enabled. - bun.assert(use_directive == .none or bundler.options.react_server_components); - step.* = .resolve; - ast.target = target; return Result.Success{ .ast = ast, @@ -3182,6 +3410,171 @@ pub const ParseTask = struct { } }; +/// Files for Server Components are generated using `AstBuilder`, instead of +/// running through the js_parser. It emits a ParseTask.Result and joins +/// with the same logic that it runs though. +pub const ServerComponentParseTask = struct { + task: ThreadPoolLib.Task = .{ .callback = &taskCallbackWrap }, + data: Data, + ctx: *BundleV2, + source: Logger.Source, + + pub const Data = union(enum) { + /// Generate server-side code for a "use client" module. Given the + /// client ast, a "reference proxy" is created with identical exports. + client_reference_proxy: ReferenceProxy, + + pub const ReferenceProxy = struct { + other_source: Logger.Source, + named_exports: JSAst.NamedExports, + }; + }; + + fn taskCallbackWrap(thread_pool_task: *ThreadPoolLib.Task) void { + const task: *ServerComponentParseTask = @fieldParentPtr("task", thread_pool_task); + var worker = ThreadPool.Worker.get(task.ctx); + defer worker.unget(); + var log = Logger.Log.init(worker.allocator); + + const result = bun.default_allocator.create(ParseTask.Result) catch bun.outOfMemory(); + result.* = .{ + .ctx = task.ctx, + .task = undefined, + + .value = if (taskCallback( + task, + &log, + worker.allocator, + )) |success| + .{ .success = success } + else |err| brk: { + break :brk .{ .err = .{ + .err = err, + .step = .resolve, + .log = log, + } }; + }, + }; + + switch (worker.ctx.loop().*) { + .js => |jsc_event_loop| { + jsc_event_loop.enqueueTaskConcurrent(JSC.ConcurrentTask.fromCallback(result, ParseTask.onComplete)); + }, + .mini => |*mini| { + mini.enqueueTaskConcurrentWithExtraCtx( + ParseTask.Result, + BundleV2, + result, + BundleV2.onParseTaskComplete, + .task, + ); + }, + } + } + + fn taskCallback( + task: *ServerComponentParseTask, + log: *Logger.Log, + allocator: std.mem.Allocator, + ) !ParseTask.Result.Success { + var ab = try AstBuilder.init(allocator, &task.source, task.ctx.bundler.options.hot_module_reloading); + + try switch (task.data) { + .client_reference_proxy => |data| task.generateClientReferenceProxy(data, &ab), + }; + + var ast = try ab.toBundledAst(); + ast.target = switch (task.data) { + // Server-side + .client_reference_proxy => task.ctx.bundler.options.target, + }; + + return .{ + .ast = ast, + .source = task.source, + .log = log.*, + }; + } + + fn generateClientReferenceProxy(task: *ServerComponentParseTask, data: Data.ReferenceProxy, b: *AstBuilder) !void { + const server_components = task.ctx.framework.?.server_components orelse + unreachable; // config must be non-null to enter this function + + const client_named_exports = data.named_exports; + + const register_client_reference = (try b.addImportStmt( + server_components.server_runtime_import, + &.{server_components.server_register_client_reference}, + ))[0]; + + const module_path = b.newExpr(E.String{ .data = data.other_source.path.pretty }); + + for (client_named_exports.keys()) |key| { + const export_ref = try b.newSymbol(.other, key); + + const is_default = bun.strings.eqlComptime(key, "default"); + + // This error message is taken from + // https://github.com/facebook/react/blob/c5b9375767e2c4102d7e5559d383523736f1c902/packages/react-server-dom-webpack/src/ReactFlightWebpackNodeLoader.js#L323-L354 + const err_msg_string = try if (is_default) + std.fmt.allocPrint( + b.allocator, + "Attempted to call the default export of {[module_path]s} from " ++ + "the server, but it's on the client. It's not possible to invoke a " ++ + "client function from the server, it can only be rendered as a " ++ + "Component or passed to props of a Client Component.", + .{ .module_path = data.other_source.path.pretty }, + ) + else + std.fmt.allocPrint( + b.allocator, + "Attempted to call {[key]s}() from the server but {[key]s} " ++ + "is on the client. It's not possible to invoke a client function from " ++ + "the server, it can only be rendered as a Component or passed to " ++ + "props of a Client Component.", + .{ .key = key }, + ); + + // throw new Error(...) + const err_msg = b.newExpr(E.New{ + .target = b.newExpr(E.Identifier{ + .ref = try b.newExternalSymbol("Error"), + }), + .args = try BabyList(Expr).fromSlice(b.allocator, &.{ + b.newExpr(E.String{ .data = err_msg_string }), + }), + .close_parens_loc = Logger.Loc.Empty, + }); + + // export const Comp = registerClientReference( + // () => { throw new Error(...) }, + // "src/filepath.tsx", + // "Comp" + // ); + try b.appendStmt(S.Local{ + .decls = try G.Decl.List.fromSlice(b.allocator, &.{.{ + .binding = Binding.alloc(b.allocator, B.Identifier{ .ref = export_ref }, Logger.Loc.Empty), + .value = b.newExpr(E.Call{ + .target = register_client_reference, + .args = try js_ast.ExprNodeList.fromSlice(b.allocator, &.{ + b.newExpr(E.Arrow{ .body = .{ + .stmts = try b.allocator.dupe(Stmt, &.{ + b.newStmt(S.Throw{ .value = err_msg }), + }), + .loc = Logger.Loc.Empty, + } }), + module_path, + b.newExpr(E.String{ .data = key }), + }), + }), + }}), + .is_export = true, + .kind = .k_const, + }); + } + } +}; + const IdentityContext = @import("../identity_context.zig").IdentityContext; const RefVoidMap = std.ArrayHashMapUnmanaged(Ref, void, Ref.ArrayHashCtx, false); @@ -3191,9 +3584,9 @@ const ResolvedExports = bun.StringArrayHashMapUnmanaged(ExportData); const TopLevelSymbolToParts = js_ast.Ast.TopLevelSymbolToParts; pub const WrapKind = enum(u2) { - none = 0, - cjs = 1, - esm = 2, + none, + cjs, + esm, }; pub const ImportData = struct { @@ -3355,33 +3748,68 @@ pub const JSMeta = struct { }; pub const Graph = struct { - entry_points: std.ArrayListUnmanaged(Index) = .{}, - ast: MultiArrayList(JSAst) = .{}, - - input_files: InputFile.List = .{}, - + // TODO: consider removing references to this in favor of bundler.options.code_splitting code_splitting: bool = false, - pool: *ThreadPool = undefined, - - heap: ThreadlocalArena = ThreadlocalArena{}, - /// Main thread only!! + pool: *ThreadPool, + heap: ThreadlocalArena = .{}, + /// This allocator is thread-local to the Bundler thread allocator: std.mem.Allocator = undefined, + /// Mapping user-specified entry points to their Source Index + entry_points: std.ArrayListUnmanaged(Index) = .{}, + /// Every source index has an associated InputFile + input_files: MultiArrayList(InputFile) = .{}, + /// Every source index has an associated Ast + /// When a parse is in progress / queued, it is `Ast.empty` + ast: MultiArrayList(JSAst) = .{}, + + // During the scan + parse phase, these atomics keep track + // of the remaining tasks. Once it hits zero, linking begins. + // + // TODO: these use atomicRmw across the codebase, but it seems at a glance + // that each usage is on the main thread. if that is not true, convert this + // to use std.atomic.Value instead. also consider merging the two, and also + // using u32, since Ref does not support addressing sources above maxInt(u31) parse_pending: usize = 0, resolve_pending: usize = 0, - /// Stable source index mapping - source_index_map: std.AutoArrayHashMapUnmanaged(Index.Int, Ref.Int) = .{}, + /// Maps a hashed path string to a source index, if it exists in the compilation. + /// Instead of accessing this directly, consider using BundleV2.pathToSourceIndexMap path_to_source_index_map: PathToSourceIndexMap = .{}, + /// When using server components, a completely separate file listing is + /// required to avoid incorrect inlining of defines and dependencies on + /// other files. This is relevant for files shared between server and client + /// and have no "use " directive, and must be duplicated. + /// + /// To make linking easier, this second graph contains indices into the + /// same `.ast` and `.input_files` arrays. + client_path_to_source_index_map: PathToSourceIndexMap = .{}, + /// When using server components with React, there is an additional module + /// graph which is used to contain SSR-versions of all client components; + /// the SSR graph. The difference between the SSR graph and the server + /// graph is that this one does not apply '--conditions react-server' + /// + /// In Bun's React Framework, it includes SSR versions of 'react' and + /// 'react-dom' (an export condition is used to provide a different + /// implementation for RSC, which is potentially how they implement + /// server-only features such as async components). + ssr_path_to_source_index_map: PathToSourceIndexMap = .{}, + + /// When Server Components is enabled, this holds a list of all boundary + /// files. This happens for all files with a "use " directive. + server_component_boundaries: ServerComponentBoundary.List = .{}, - use_directive_entry_points: UseDirective.List = .{}, + // TODO: this has no reason to be using logger.Range + shadow_entry_point_range: Logger.Range = Logger.Range.None, + // TODO: document what makes this estimate not perfect estimated_file_loader_count: usize = 0, additional_output_files: std.ArrayListUnmanaged(options.OutputFile) = .{}, - shadow_entry_point_range: Logger.Range = Logger.Range.None, - shadow_entry_points: std.ArrayListUnmanaged(ShadowEntryPoint) = .{}, + + kit_referenced_server_data: bool, + kit_referenced_client_data: bool, pub const InputFile = struct { source: Logger.Source, @@ -3390,8 +3818,6 @@ pub const Graph = struct { additional_files: BabyList(AdditionalFile) = .{}, unique_key_for_additional_file: string = "", content_hash_for_additional_file: u64 = 0, - - pub const List = MultiArrayList(InputFile); }; }; @@ -3452,14 +3878,15 @@ const EntryPoint = struct { return this == .user_specified or this == .react_server_component; } + // TODO: Rename this to isServerComponentBoundary pub fn isReactReference(this: Kind) bool { return this == .react_client_component or this == .react_server_component; } pub fn useDirective(this: Kind) UseDirective { return switch (this) { - .react_client_component => .@"use client", - .react_server_component => .@"use server", + .react_client_component => .client, + .react_server_component => .server, else => .none, }; } @@ -3474,10 +3901,6 @@ const AstSourceIDMapping = struct { const LinkerGraph = struct { const debug = Output.scoped(.LinkerGraph, false); - /// TODO(@paperdave): remove this. i added it before realizing this is available - /// via LinkerContext.parse_graph. it may also be worth removing the other cloned data. - bundler_graph: *const Graph, - files: File.List = .{}, files_live: BitSet = undefined, entry_points: EntryPoint.List = .{}, @@ -3523,13 +3946,13 @@ const LinkerGraph = struct { pub fn useDirectiveBoundary(this: *const LinkerGraph, source_index: Index.Int) UseDirective { if (this.react_client_component_boundary.bit_length > 0) { if (this.react_client_component_boundary.isSet(source_index)) { - return .@"use client"; + return .client; } } if (this.react_server_component_boundary.bit_length > 0) { if (this.react_server_component_boundary.isSet(source_index)) { - return .@"use server"; + return .server; } } @@ -3646,6 +4069,7 @@ const LinkerGraph = struct { return part_id; } + pub fn generateSymbolImportAndUse( g: *LinkerGraph, source_index: u32, @@ -3722,10 +4146,11 @@ const LinkerGraph = struct { this: *LinkerGraph, entry_points: []const Index, sources: []const Logger.Source, - use_directive_entry_points: UseDirective.List, + server_component_boundaries: ServerComponentBoundary.List, dynamic_import_entry_points: []const Index.Int, shadow_entry_point_range: Logger.Range, ) !void { + const scb = server_component_boundaries.slice(); try this.files.setCapacity(this.allocator, sources.len); this.files.zero(); this.files_live = try BitSet.initEmpty( @@ -3743,7 +4168,7 @@ const LinkerGraph = struct { // Setup entry points { - try this.entry_points.setCapacity(this.allocator, entry_points.len + use_directive_entry_points.len + dynamic_import_entry_points.len); + try this.entry_points.setCapacity(this.allocator, entry_points.len + server_component_boundaries.list.len + dynamic_import_entry_points.len); this.entry_points.len = entry_points.len; const source_indices = this.entry_points.items(.source_index); @@ -3786,20 +4211,24 @@ const LinkerGraph = struct { this.meta.len = this.ast.len; this.meta.zero(); - if (use_directive_entry_points.len > 0) { + if (server_component_boundaries.list.len > 0) { this.react_client_component_boundary = BitSet.initEmpty(this.allocator, this.files.len) catch unreachable; this.react_server_component_boundary = BitSet.initEmpty(this.allocator, this.files.len) catch unreachable; var any_server = false; var any_client = false; // Loop #1: populate the list of files that are react client components - for (use_directive_entry_points.items(.use_directive), use_directive_entry_points.items(.source_index)) |use, source_id| { - if (use == .@"use client") { - any_client = true; - this.react_client_component_boundary.set(source_id); - } else if (use == .@"use server") { - any_server = true; - this.react_server_component_boundary.set(source_id); + for (scb.list.items(.use_directive), scb.list.items(.source_index)) |use, source_id| { + switch (use) { + .none => {}, + .client => { + any_client = true; + this.react_client_component_boundary.set(source_id); + }, + .server => { + any_server = true; + this.react_server_component_boundary.set(source_id); + }, } } @@ -3808,6 +4237,8 @@ const LinkerGraph = struct { for (this.reachable_files) |source_id| { const use_directive = this.useDirectiveBoundary(source_id.get()); const source_i32 = @as(i32, @intCast(source_id.get())); + + // TODO(paperdave/kit): i am not sure if this logic is correct const is_shadow_entrypoint = shadow_entry_point_range.contains(source_i32); // If the reachable file has a "use client"; at the top @@ -3821,20 +4252,27 @@ const LinkerGraph = struct { const other = this.useDirectiveBoundary(source_index); if (use_directive.boundering(other)) |boundary| { - - // That import is a React Server Component reference. + // That import is a Server Component reference. switch (boundary) { - .@"use client" => { + .client => { if (!is_shadow_entrypoint) { - const pretty = sources[source_index].path.pretty; - import_record.module_id = bun.hash32(pretty); - import_record.tag = .react_client_component; - import_record.path.namespace = "client"; - import_record.print_namespace_in_path = true; - import_record.source_index = Index.invalid; + // const pretty = sources[source_index].path.pretty; + // import_record.module_id = bun.hash32(pretty); + // import_record.tag = .react_client_component; + // import_record.path.namespace = "client"; + // import_record.print_namespace_in_path = true; + import_record.source_index = Index.init( + scb.getReferenceSourceIndex( + source_index, + ) orelse unreachable, // file didn't have a boundary + ); + bun.assert(import_record.source_index.isValid()); // did not generate } }, - .@"use server" => { + .server => { + { + bun.todoPanic(@src(), "\"use server\"", .{}); + } import_record.module_id = bun.hash32(sources[source_index].path.pretty); import_record.tag = .react_server_component; import_record.path.namespace = "server"; @@ -3852,7 +4290,6 @@ const LinkerGraph = struct { entry_point_kinds[source_index] = .react_server_component; } }, - else => unreachable, } } } @@ -3877,11 +4314,9 @@ const LinkerGraph = struct { stable_source_indices[source_index.get()] = Index.source(i); } - const file = LinkerGraph.File{}; - // TODO: verify this outputs efficient code @memset( files.items(.distance_from_entry_point), - file.distance_from_entry_point, + (LinkerGraph.File{}).distance_from_entry_point, ); this.stable_source_indices = @as([]const u32, @ptrCast(stable_source_indices)); } @@ -3937,16 +4372,11 @@ const LinkerGraph = struct { var resolved = ResolvedExports{}; resolved.ensureTotalCapacity(this.allocator, src.count()) catch unreachable; for (src.keys(), src.values()) |key, value| { - resolved.putAssumeCapacityNoClobber( - key, - .{ - .data = .{ - .import_ref = value.ref, - .name_loc = value.alias_loc, - .source_index = Index.source(source_index), - }, - }, - ); + resolved.putAssumeCapacityNoClobber(key, .{ .data = .{ + .import_ref = value.ref, + .name_loc = value.alias_loc, + .source_index = Index.source(source_index), + } }); } dest.* = resolved; } @@ -4024,6 +4454,10 @@ pub const LinkerContext = struct { /// to know whether or not we can free it safely. pending_task_count: std.atomic.Value(u32) = std.atomic.Value(u32).init(0), + /// Used by Kit to extract []CompileResult before it is joined + kit_dev_server: ?*bun.kit.DevServer = null, + framework: ?*const kit.Framework = null, + pub const LinkerOptions = struct { output_format: options.Format = .esm, ignore_dce_annotations: bool = false, @@ -4035,7 +4469,7 @@ pub const LinkerContext = struct { source_maps: options.SourceMapOption = .none, target: options.Target = .browser, - mode: Mode = Mode.bundle, + mode: Mode = .bundle, public_path: []const u8 = "", @@ -4151,7 +4585,7 @@ pub const LinkerContext = struct { this: *LinkerContext, bundle: *BundleV2, entry_points: []Index, - use_directive_entry_points: UseDirective.List, + server_component_boundaries: ServerComponentBoundary.List, reachable: []Index, ) !void { const trace = tracer(@src(), "CloneLinkerGraph"); @@ -4168,7 +4602,7 @@ pub const LinkerContext = struct { const sources: []const Logger.Source = this.parse_graph.input_files.items(.source); - try this.graph.load(entry_points, sources, use_directive_entry_points, bundle.dynamic_import_entry_points.keys(), bundle.graph.shadow_entry_point_range); + try this.graph.load(entry_points, sources, server_component_boundaries, bundle.dynamic_import_entry_points.keys(), bundle.graph.shadow_entry_point_range); bundle.dynamic_import_entry_points.deinit(); this.wait_group.init(); this.ambiguous_result_pool = std.ArrayList(MatchImport).init(this.allocator); @@ -4226,14 +4660,14 @@ pub const LinkerContext = struct { this: *LinkerContext, bundle: *BundleV2, entry_points: []Index, - use_directive_entry_points: UseDirective.List, + server_component_boundaries: ServerComponentBoundary.List, reachable: []Index, unique_key: u64, ) ![]Chunk { try this.load( bundle, entry_points, - use_directive_entry_points, + server_component_boundaries, reachable, ); @@ -4518,7 +4952,7 @@ pub const LinkerContext = struct { pub fn visit( v: *@This(), source_index: Index.Int, - comptime with_react_server_components: UseDirective.Flags, + comptime with_server_components: UseDirective.Flags, comptime with_code_splitting: bool, ) void { if (source_index == Index.invalid.value) return; @@ -4532,12 +4966,12 @@ pub const LinkerContext = struct { // when NOT code splitting, include the file in the chunk if ANY of the entry points overlap v.entry_bits.hasIntersection(&v.c.graph.files.items(.entry_bits)[source_index]); - if (comptime with_react_server_components.is_client or with_react_server_components.is_server) { + if (with_server_components.has_any_client or with_server_components.has_any_server) { if (is_file_in_chunk and v.entry_point.is_entry_point and v.entry_point.source_index != source_index) { - if (comptime with_react_server_components.is_client) { + if (with_server_components.has_any_client) { if (v.c.graph.react_client_component_boundary.isSet(source_index)) { if (!v.c.graph.react_client_component_boundary.isSet(v.entry_point.source_index)) { return; @@ -4545,7 +4979,7 @@ pub const LinkerContext = struct { } } - if (comptime with_react_server_components.is_server) { + if (with_server_components.has_any_server) { if (v.c.graph.react_server_component_boundary.isSet(source_index)) { if (!v.c.graph.react_server_component_boundary.isSet(v.entry_point.source_index)) { return; @@ -4576,7 +5010,7 @@ pub const LinkerContext = struct { continue; } - v.visit(record.source_index.get(), with_react_server_components, with_code_splitting); + v.visit(record.source_index.get(), with_server_components, with_code_splitting); } } @@ -4643,8 +5077,8 @@ pub const LinkerContext = struct { visitor.visit( Index.runtime.value, .{ - .is_server = with_server, - .is_client = with_client, + .has_any_server = with_server, + .has_any_client = with_client, }, with_code_splitting, ); @@ -4652,8 +5086,8 @@ pub const LinkerContext = struct { visitor.visit( order.source_index, .{ - .is_server = with_server, - .is_client = with_client, + .has_any_server = with_server, + .has_any_client = with_client, }, with_code_splitting, ); @@ -6134,6 +6568,51 @@ pub const LinkerContext = struct { entry_point_kinds, ); } + + // When using server components with a separated SSR graph, these + // components are not required to be referenced; The framework may + // use a dynamic import to get a handle to it. + if (c.framework) |fw| if (fw.server_components) |sc| { + if (sc.separate_ssr_graph) { + const slice = c.parse_graph.server_component_boundaries.list.slice(); + for (slice.items(.use_directive), slice.items(.ssr_source_index)) |use, ssr_source_index| { + switch (use) { + .client => { + c.markFileLiveForTreeShaking( + ssr_source_index, + side_effects, + parts, + import_records, + entry_point_kinds, + ); + }, + .server => bun.todoPanic(@src(), "rewire hot-bundling code", .{}), + else => unreachable, + } + } + } + + // TODO: this is a workaround for a missing tree-shaking + // annotated wrt these generated segments + if (c.parse_graph.kit_referenced_server_data) { + c.markFileLiveForTreeShaking( + Index.kit_server_data.get(), + side_effects, + parts, + import_records, + entry_point_kinds, + ); + } + if (c.parse_graph.kit_referenced_client_data) { + c.markFileLiveForTreeShaking( + Index.kit_client_data.get(), + side_effects, + parts, + import_records, + entry_point_kinds, + ); + } + }; } { @@ -6165,6 +6644,27 @@ pub const LinkerContext = struct { import_records, file_entry_bits, ); + + if (c.framework) |fw| if (fw.server_components) |sc| if (sc.separate_ssr_graph) { + const slice = c.parse_graph.server_component_boundaries.list.slice(); + for (slice.items(.use_directive), slice.items(.ssr_source_index)) |use, ssr_source_index| { + switch (use) { + .client => { + c.markFileReachableForCodeSplitting( + ssr_source_index, + i, + distances, + 0, + parts, + import_records, + file_entry_bits, + ); + }, + .server => bun.todoPanic(@src(), "rewire hot-bundling code", .{}), + else => unreachable, + } + } + }; } } } @@ -6377,18 +6877,6 @@ pub const LinkerContext = struct { if (other_chunk_index == chunk_index or other_chunk.content != .javascript) continue; if (other_chunk.entry_bits.isSet(chunk.entry_point.entry_point_id)) { - if (other_chunk.entry_point.is_entry_point) { - if (c.graph.react_client_component_boundary.bit_length > 0 or c.graph.react_server_component_boundary.bit_length > 0) { - const other_kind = c.graph.files.items(.entry_point_kind)[other_chunk.entry_point.source_index]; - const this_kind = c.graph.files.items(.entry_point_kind)[chunk.entry_point.source_index]; - - if (this_kind != .react_client_component and - other_kind.isReactReference()) - { - continue; - } - } - } _ = js.imports_from_other_chunks.getOrPutValue( c.allocator, @as(u32, @truncate(other_chunk_index)), @@ -6913,8 +7401,15 @@ pub const LinkerContext = struct { const trace = tracer(@src(), "generateCodeForFileInChunkJS"); defer trace.end(); + // Client bundles for Kit must be globally allocated, + // as it must outlive the bundle task. + const use_global_allocator = c.kit_dev_server != null and + c.parse_graph.ast.items(.target)[part_range.source_index.get()].kitRenderer() == .client; + var arena = &worker.temporary_arena; - var buffer_writer = js_printer.BufferWriter.init(worker.allocator) catch unreachable; + var buffer_writer = js_printer.BufferWriter.init( + if (use_global_allocator) default_allocator else worker.allocator, + ) catch bun.outOfMemory(); defer _ = arena.reset(.retain_capacity); worker.stmt_list.reset(); @@ -7251,10 +7746,9 @@ pub const LinkerContext = struct { } { const input = c.parse_graph.input_files.items(.source)[chunk.entry_point.source_index].path; - // var buf = MutableString.initEmpty(c.allocator); - // js_printer.quoteForJSONBuffer(input.pretty, &buf, true) catch bun.outOfMemory(); - // const str = buf.toOwnedSliceLeaky(); // c.allocator is an arena - const str = try std.fmt.allocPrint(c.allocator, "{d}", .{input.hashForKit()}); + var buf = MutableString.initEmpty(worker.allocator); + js_printer.quoteForJSONBuffer(input.pretty, &buf, true) catch bun.outOfMemory(); + const str = buf.toOwnedSliceLeaky(); // worker.allocator is an arena j.pushStatic(str); line_offset.advance(str); } @@ -8078,9 +8572,6 @@ pub const LinkerContext = struct { ast: *const JSAst, ) !bool { const record = ast.import_records.at(import_record_index); - if (record.tag.isReactReference()) - return false; - // Is this an external import? if (!record.source_index.isValid()) { // Keep the "import" statement if import statements are supported @@ -8731,7 +9222,7 @@ pub const LinkerContext = struct { allocator: std.mem.Allocator, ast: *const JSAst, ) !void { - _ = source_index; // autofix + _ = source_index; // may be used const receiver_args = try allocator.dupe(G.Arg, &.{ .{ .binding = Binding.alloc(allocator, B.Identifier{ .ref = ast.module_ref }, Logger.Loc.Empty) }, @@ -8745,10 +9236,8 @@ pub const LinkerContext = struct { .s_local => |st| { // TODO: check if this local is immediately assigned // `require()` if so, we will instrument it with hot module - // reloading. other cases of `require` won't receive receive - // updates. - _ = st; // autofix - + // reloading. other cases of `require` won't receive updates. + _ = st; try stmts.inside_wrapper_suffix.append(stmt); }, .s_import => |st| { @@ -8758,31 +9247,32 @@ pub const LinkerContext = struct { // automatically, instead of with bundler-added // annotations like '__commonJS'. // - // this is not done in the parse step because the final + // this cannot be done in the parse step because the final // pretty path is not yet known. the other statement types // are not handled here because some of those generate // new local variables (it is too late to do that here). const record = ast.import_records.at(st.import_record_index); - const path = c.parse_graph.input_files.items(.source)[record.source_index.get()].path; + const path = if (record.source_index.isValid()) + c.parse_graph.input_files.items(.source)[record.source_index.get()].path + else + record.path; + const is_builtin = record.tag == .builtin or record.tag == .bun_test or record.tag == .bun; const is_bare_import = st.star_name_loc == null and st.items.len == 0 and st.default_name == null; - const key_expr = Expr.init(E.InlinedEnum, .{ - .comment = path.pretty, - .value = Expr.init(E.Number, .{ - .value = @floatFromInt(path.hashForKit()), - }, stmt.loc), + const key_expr = Expr.init(E.String, .{ + .data = path.pretty, }, stmt.loc); // module.importSync('path', (module) => ns = module) const call = Expr.init(E.Call, .{ .target = Expr.init(E.Dot, .{ .target = module_id, - .name = "importSync", + .name = if (is_builtin) "importBuiltin" else "importSync", .name_loc = stmt.loc, }, stmt.loc), .args = js_ast.ExprNodeList.init( - try allocator.dupe(Expr, if (is_bare_import) + try allocator.dupe(Expr, if (is_bare_import or is_builtin) &.{key_expr} else &.{ @@ -9044,6 +9534,16 @@ pub const LinkerContext = struct { // Turn each module into a function if this is Kit var stmt_storage: Stmt = undefined; if (c.options.output_format == .internal_kit_dev and !part_range.source_index.isRuntime()) { + if (stmts.all_stmts.items.len == 0) { + // TODO: these chunks should just not exist in the first place + // they seem to happen on the entry point? or JSX? not clear + // removing the chunk in the parser breaks the liveness analysis. + // + // The workaround is to end early on empty files, and filter out + // empty files later. + return .{ .result = .{ .code = "", .source_map = null } }; + } + var clousure_args = std.BoundedArray(G.Arg, 2).fromSlice(&.{ .{ .binding = Binding.alloc(temp_allocator, B.Identifier{ .ref = ast.module_ref, @@ -9533,8 +10033,8 @@ pub const LinkerContext = struct { for (chunk.content.javascript.parts_in_chunk_in_order, 0..) |part_range, i| { remaining_part_ranges[0] = .{ .part_range = part_range, - .i = @as(u32, @truncate(i)), - .task = ThreadPoolLib.Task{ + .i = @truncate(i), + .task = .{ .callback = &generateCompileResultForJSChunk, }, .ctx = chunk_ctx, @@ -9557,6 +10057,36 @@ pub const LinkerContext = struct { c.source_maps.quoted_contents_tasks.len = 0; } + // When kit.DevServer is in use, we're going to take a different code path at the end. + // We want to extract the source code of each part instead of combining it into a single file. + // This is so that when hot-module updates happen, we can: + // + // - Reuse unchanged parts to assemble the full bundle if Cmd+R is used in the browser + // - Send only the newly changed code through a socket. + // + // When this isnt the initial bundle, the data we would get concatenating + // everything here would be useless. + if (c.kit_dev_server) |dev_server| { + const input_file_sources = c.parse_graph.input_files.items(.source); + const targets = c.parse_graph.ast.items(.target); + for (chunks) |chunk| { + for ( + chunk.content.javascript.parts_in_chunk_in_order, + chunk.compile_results_for_chunk, + ) |part_range, compile_result| { + try dev_server.receiveChunk( + input_file_sources[part_range.source_index.get()].path.text, + targets[part_range.source_index.get()].kitRenderer(), + compile_result, + ); + } + } + + // kit.main_path = default_allocator.dupe(u8, c.parse_graph.input_files.items(.source)[chunks[0].entry_point.source_index].path.pretty) catch bun.outOfMemory(); + + return std.ArrayList(options.OutputFile).init(bun.default_allocator); + } + { debug(" START {d} postprocess chunks", .{chunks.len}); defer debug(" DONE {d} postprocess chunks", .{chunks.len}); @@ -9666,152 +10196,10 @@ pub const LinkerContext = struct { } } - const react_client_components_manifest: []u8 = if (c.resolver.opts.react_server_components) brk: { - var bytes = std.ArrayList(u8).init(c.allocator); - defer bytes.deinit(); - const all_sources = c.parse_graph.input_files.items(.source); - var all_named_exports = c.graph.ast.items(.named_exports); - var export_names = std.ArrayList(Api.StringPointer).init(c.allocator); - defer export_names.deinit(); - - var client_modules = std.ArrayList(Api.ClientServerModule).initCapacity(c.allocator, c.graph.react_client_component_boundary.count()) catch unreachable; - defer client_modules.deinit(); - var server_modules = std.ArrayList(Api.ClientServerModule).initCapacity(c.allocator, c.graph.react_server_component_boundary.count()) catch unreachable; - defer server_modules.deinit(); - - var react_client_components_iterator = c.graph.react_client_component_boundary.iterator(.{}); - var react_server_components_iterator = c.graph.react_server_component_boundary.iterator(.{}); - - var sorted_client_component_ids = std.ArrayList(u32).initCapacity(c.allocator, client_modules.capacity) catch unreachable; - defer sorted_client_component_ids.deinit(); - while (react_client_components_iterator.next()) |source_index| { - if (!c.graph.files_live.isSet(source_index)) continue; - sorted_client_component_ids.appendAssumeCapacity(@as(u32, @intCast(source_index))); - } - - var sorted_server_component_ids = std.ArrayList(u32).initCapacity(c.allocator, server_modules.capacity) catch unreachable; - defer sorted_server_component_ids.deinit(); - while (react_server_components_iterator.next()) |source_index| { - if (!c.graph.files_live.isSet(source_index)) continue; - sorted_server_component_ids.appendAssumeCapacity(@as(u32, @intCast(source_index))); - } - - const Sorter = struct { - sources: []const Logger.Source, - pub fn isLessThan(ctx: @This(), a_index: u32, b_index: u32) bool { - const a = ctx.sources[a_index].path.text; - const b = ctx.sources[b_index].path.text; - return strings.order(a, b) == .lt; - } - }; - std.sort.pdq(u32, sorted_client_component_ids.items, Sorter{ .sources = all_sources }, Sorter.isLessThan); - std.sort.pdq(u32, sorted_server_component_ids.items, Sorter{ .sources = all_sources }, Sorter.isLessThan); - - inline for (.{ - sorted_client_component_ids.items, - sorted_server_component_ids.items, - }, .{ - &client_modules, - &server_modules, - }) |sorted_component_ids, modules| { - for (sorted_component_ids) |component_source_index| { - var source_index_for_named_exports = component_source_index; - - const chunk: *Chunk = brk2: { - for (chunks) |*chunk_| { - if (!chunk_.entry_point.is_entry_point) continue; - if (chunk_.entry_point.source_index == @as(u32, @intCast(component_source_index))) { - break :brk2 chunk_; - } - - if (chunk_.files_with_parts_in_chunk.contains(component_source_index)) { - source_index_for_named_exports = chunk_.entry_point.source_index; - break :brk2 chunk_; - } - } - - @panic("could not find chunk for component"); - }; - - var grow_length: usize = 0; - - const named_exports = all_named_exports[source_index_for_named_exports].keys(); - - try export_names.ensureUnusedCapacity(named_exports.len); - const exports_len = @as(u32, @intCast(named_exports.len)); - const exports_start = @as(u32, @intCast(export_names.items.len)); - - grow_length += chunk.final_rel_path.len; - - grow_length += all_sources[component_source_index].path.pretty.len; - - for (named_exports) |export_name| { - try export_names.append(Api.StringPointer{ - .offset = @as(u32, @intCast(bytes.items.len + grow_length)), - .length = @as(u32, @intCast(export_name.len)), - }); - grow_length += export_name.len; - } - - try bytes.ensureUnusedCapacity(grow_length); - - const input_name = Api.StringPointer{ - .offset = @as(u32, @intCast(bytes.items.len)), - .length = @as(u32, @intCast(all_sources[component_source_index].path.pretty.len)), - }; - - bytes.appendSliceAssumeCapacity(all_sources[component_source_index].path.pretty); - - const asset_name = Api.StringPointer{ - .offset = @as(u32, @intCast(bytes.items.len)), - .length = @as(u32, @intCast(chunk.final_rel_path.len)), - }; - - bytes.appendSliceAssumeCapacity(chunk.final_rel_path); - - for (named_exports) |export_name| { - bytes.appendSliceAssumeCapacity(export_name); - } - - modules.appendAssumeCapacity(.{ - .module_id = bun.hash32(all_sources[component_source_index].path.pretty), - .asset_name = asset_name, - .input_name = input_name, - .export_names = .{ - .length = exports_len, - .offset = exports_start, - }, - }); - } - } - - if (client_modules.items.len == 0 and server_modules.items.len == 0) break :brk &.{}; - - var manifest = Api.ClientServerModuleManifest{ - .version = 2, - .client_modules = client_modules.items, - - // TODO: - .ssr_modules = client_modules.items, - - .server_modules = server_modules.items, - .export_names = export_names.items, - .contents = bytes.items, - }; - var byte_buffer = std.ArrayList(u8).initCapacity(bun.default_allocator, bytes.items.len) catch unreachable; - var byte_buffer_writer = byte_buffer.writer(); - const SchemaWriter = schema.Writer(@TypeOf(&byte_buffer_writer)); - var writer = SchemaWriter.init(&byte_buffer_writer); - manifest.encode(&writer) catch unreachable; - break :brk byte_buffer.items; - } else &.{}; - var output_files = std.ArrayList(options.OutputFile).initCapacity( bun.default_allocator, - (if (c.options.source_maps.hasExternalFiles()) chunks.len * 2 else chunks.len) + @as( - usize, - @intFromBool(react_client_components_manifest.len > 0) + c.parse_graph.additional_output_files.items.len, - ), + (if (c.options.source_maps.hasExternalFiles()) chunks.len * 2 else chunks.len) + + @as(usize, c.parse_graph.additional_output_files.items.len), ) catch unreachable; const root_path = c.resolver.opts.output_dir; @@ -9822,7 +10210,7 @@ pub const LinkerContext = struct { } if (root_path.len > 0) { - try c.writeOutputFilesToDisk(root_path, chunks, react_client_components_manifest, &output_files); + try c.writeOutputFilesToDisk(root_path, chunks, &output_files); } else { // In-memory build @@ -9946,25 +10334,6 @@ pub const LinkerContext = struct { } } - if (react_client_components_manifest.len > 0) { - output_files.appendAssumeCapacity(options.OutputFile.init( - .{ - .data = .{ - .buffer = .{ - .data = react_client_components_manifest, - .allocator = bun.default_allocator, - }, - }, - - .input_path = try bun.default_allocator.dupe(u8, components_manifest_path), - .output_path = try bun.default_allocator.dupe(u8, components_manifest_path), - .loader = .file, - .input_loader = .file, - .output_kind = .@"component-manifest", - }, - )); - } - output_files.appendSliceAssumeCapacity(c.parse_graph.additional_output_files.items); } @@ -10006,11 +10375,11 @@ pub const LinkerContext = struct { if (strings.eqlComptime(from_chunk_dir, ".")) from_chunk_dir = ""; - const additional_files: []AdditionalFile = c.graph.bundler_graph.input_files.items(.additional_files)[piece.index.index].slice(); + const additional_files: []AdditionalFile = c.parse_graph.input_files.items(.additional_files)[piece.index.index].slice(); bun.assert(additional_files.len > 0); switch (additional_files[0]) { .output_file => |output_file_id| { - const path = c.graph.bundler_graph.additional_output_files.items[output_file_id].dest_path; + const path = c.parse_graph.additional_output_files.items[output_file_id].dest_path; hash.write(bun.path.relativePlatform(from_chunk_dir, path, .posix, false)); }, .source_index => {}, @@ -10028,7 +10397,6 @@ pub const LinkerContext = struct { c: *LinkerContext, root_path: string, chunks: []Chunk, - react_client_components_manifest: []const u8, output_files: *std.ArrayList(options.OutputFile), ) !void { const trace = tracer(@src(), "writeOutputFilesToDisk"); @@ -10269,57 +10637,57 @@ pub const LinkerContext = struct { } } - if (react_client_components_manifest.len > 0) { - switch (JSC.Node.NodeFS.writeFileWithPathBuffer( - &pathbuf, - JSC.Node.Arguments.WriteFile{ - .data = JSC.Node.StringOrBuffer{ - .buffer = JSC.Buffer{ - .buffer = .{ - .ptr = @constCast(react_client_components_manifest.ptr), - // TODO: handle > 4 GB files - .len = @as(u32, @truncate(react_client_components_manifest.len)), - .byte_len = @as(u32, @truncate(react_client_components_manifest.len)), - }, - }, - }, - .encoding = .buffer, - .dirfd = bun.toFD(root_dir.fd), - .file = .{ - .path = JSC.Node.PathLike{ - .string = JSC.PathString.init(components_manifest_path), - }, - }, - }, - )) { - .err => |err| { - const utf8 = err.toSystemError().message.toUTF8(bun.default_allocator); - defer utf8.deinit(); - c.log.addErrorFmt(null, Logger.Loc.Empty, bun.default_allocator, "{} writing chunk {}", .{ - bun.fmt.quote(utf8.slice()), - bun.fmt.quote(components_manifest_path), - }) catch unreachable; - return error.WriteFailed; - }, - .result => {}, - } + // if (react_client_components_manifest.len > 0) { + // switch (JSC.Node.NodeFS.writeFileWithPathBuffer( + // &pathbuf, + // JSC.Node.Arguments.WriteFile{ + // .data = JSC.Node.StringOrBuffer{ + // .buffer = JSC.Buffer{ + // .buffer = .{ + // .ptr = @constCast(react_client_components_manifest.ptr), + // // TODO: handle > 4 GB files + // .len = @as(u32, @truncate(react_client_components_manifest.len)), + // .byte_len = @as(u32, @truncate(react_client_components_manifest.len)), + // }, + // }, + // }, + // .encoding = .buffer, + // .dirfd = bun.toFD(root_dir.fd), + // .file = .{ + // .path = JSC.Node.PathLike{ + // .string = JSC.PathString.init(components_manifest_path), + // }, + // }, + // }, + // )) { + // .err => |err| { + // const utf8 = err.toSystemError().message.toUTF8(bun.default_allocator); + // defer utf8.deinit(); + // c.log.addErrorFmt(null, Logger.Loc.Empty, bun.default_allocator, "{} writing chunk {}", .{ + // bun.fmt.quote(utf8.slice()), + // bun.fmt.quote(components_manifest_path), + // }) catch unreachable; + // return error.WriteFailed; + // }, + // .result => {}, + // } - output_files.appendAssumeCapacity( - options.OutputFile.init( - options.OutputFile.Options{ - .data = .{ - .saved = 0, - }, - .loader = .file, - .input_loader = .file, - .output_kind = .@"component-manifest", - .size = @as(u32, @truncate(react_client_components_manifest.len)), - .input_path = bun.default_allocator.dupe(u8, components_manifest_path) catch unreachable, - .output_path = bun.default_allocator.dupe(u8, components_manifest_path) catch unreachable, - }, - ), - ); - } + // output_files.appendAssumeCapacity( + // options.OutputFile.init( + // options.OutputFile.Options{ + // .data = .{ + // .saved = 0, + // }, + // .loader = .file, + // .input_loader = .file, + // .output_kind = .@"component-manifest", + // .size = @as(u32, @truncate(react_client_components_manifest.len)), + // .input_path = bun.default_allocator.dupe(u8, components_manifest_path) catch unreachable, + // .output_path = bun.default_allocator.dupe(u8, components_manifest_path) catch unreachable, + // }, + // ), + // ); + // } { const offset = output_files.items.len; @@ -10453,13 +10821,9 @@ pub const LinkerContext = struct { ); // TODO: CSS AST - var imports_a_boundary = false; - const use_directive = c.graph.useDirectiveBoundary(source_index); for (import_records[source_index].slice()) |*record| { - const is_boundary = use_directive.isBoundary(record.tag.useDirective()); - imports_a_boundary = use_directive != .none and (imports_a_boundary or is_boundary); - if (record.source_index.isValid() and !is_boundary and !c.isExternalDynamicImport(record, source_index)) { + if (record.source_index.isValid() and !c.isExternalDynamicImport(record, source_index)) { c.markFileReachableForCodeSplitting( record.source_index.get(), entry_points_count, @@ -10476,12 +10840,6 @@ pub const LinkerContext = struct { for (parts_in_file) |part| { for (part.dependencies.slice()) |dependency| { if (dependency.source_index.get() != source_index) { - if (imports_a_boundary and - // "use client" -> "use server" imports don't - use_directive.isBoundary(c.graph.files.items(.entry_point_kind)[dependency.source_index.get()] - .useDirective())) - continue; - c.markFileReachableForCodeSplitting( dependency.source_index.get(), entry_points_count, @@ -10507,7 +10865,7 @@ pub const LinkerContext = struct { if (comptime bun.Environment.allow_assert) { debugTreeShake("markFileLiveForTreeShaking({d}, {s}) = {s}", .{ source_index, - c.parse_graph.input_files.get(source_index).source.path.text, + c.parse_graph.input_files.get(source_index).source.path.pretty, if (c.graph.files_live.isSet(source_index)) "seen" else "not seen", }); } @@ -10615,7 +10973,7 @@ pub const LinkerContext = struct { if (comptime bun.Environment.isDebug) { debugTreeShake("markPartLiveForTreeShaking({d}): {s}:{d} = {d}, {s}", .{ source_index, - c.parse_graph.input_files.get(source_index).source.path.text, + c.parse_graph.input_files.get(source_index).source.path.pretty, part_index, if (part.stmts.len > 0) part.stmts[0].loc.start else Logger.Loc.Empty.start, if (part.stmts.len > 0) @tagName(part.stmts[0].data) else @tagName(Stmt.empty().data), @@ -11223,7 +11581,7 @@ pub const LinkerContext = struct { imports_to_bind: *RefImportData, source_index: Index.Int, ) void { - var named_imports = named_imports_ptr.cloneWithAllocator(c.allocator) catch unreachable; + var named_imports = named_imports_ptr.clone(c.allocator) catch bun.outOfMemory(); defer named_imports_ptr.* = named_imports; const Sorter = struct { @@ -11248,13 +11606,10 @@ pub const LinkerContext = struct { const import_ref = ref; var re_exports = std.ArrayList(js_ast.Dependency).init(c.allocator); - const result = c.matchImportWithExport( - .{ - .source_index = Index.source(source_index), - .import_ref = import_ref, - }, - &re_exports, - ); + const result = c.matchImportWithExport(.{ + .source_index = Index.source(source_index), + .import_ref = import_ref, + }, &re_exports); switch (result.kind) { .normal => { @@ -12124,7 +12479,7 @@ pub const CrossChunkImport = struct { } }; -const CompileResult = union(enum) { +pub const CompileResult = union(enum) { javascript: struct { source_index: Index.Int, result: js_printer.PrintResult, @@ -12240,113 +12595,6 @@ fn cheapPrefixNormalizer(prefix: []const u8, suffix: []const u8) [2]string { }; } -const components_manifest_path = "./components-manifest.blob"; - -// For Server Components, we generate an entry point which re-exports all client components -// This is a "shadow" of the server entry point. -// The client is expected to import this shadow entry point -const ShadowEntryPoint = struct { - from_source_index: Index.Int, - to_source_index: Index.Int, - - named_exports: bun.BabyList(NamedExport) = .{}, - - pub const NamedExport = struct { - // TODO: packed string - from: string, - to: string, - source_index: Index.Int, - }; - - pub const Builder = struct { - source_code_buffer: MutableString, - ctx: *BundleV2, - resolved_source_indices: std.ArrayList(Index.Int), - shadow: *ShadowEntryPoint, - - pub fn addClientComponent( - this: *ShadowEntryPoint.Builder, - source_index: usize, - ) void { - var writer = this.source_code_buffer.writer(); - const path = this.ctx.graph.input_files.items(.source)[source_index].path; - // TODO: tree-shaking to named imports only - writer.print( - \\// {s} - \\import {} from '${d}'; - \\export {}; - \\ - , - .{ - path.pretty, - ImportsFormatter{ .ctx = this.ctx, .source_index = @as(Index.Int, @intCast(source_index)), .pretty = path.pretty }, - bun.fmt.hexIntUpper(bun.hash(path.pretty)), - ExportsFormatter{ .ctx = this.ctx, .source_index = @as(Index.Int, @intCast(source_index)), .pretty = path.pretty, .shadow = this.shadow }, - }, - ) catch unreachable; - this.resolved_source_indices.append(@as(Index.Int, @truncate(source_index))) catch unreachable; - } - }; - const ImportsFormatter = struct { - ctx: *BundleV2, - pretty: string, - source_index: Index.Int, - pub fn format(self: ImportsFormatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { - var this = self.ctx; - const named_exports: *js_ast.Ast.NamedExports = &this.graph.ast.items(.named_exports)[self.source_index]; - try writer.writeAll("{"); - for (named_exports.keys()) |*named| { - named.* = try std.fmt.allocPrint( - this.graph.allocator, - "${}_{s}", - .{ - bun.fmt.hexIntLower(bun.hash(self.pretty)), - named.*, - }, - ); - } - try named_exports.reIndex(); - - for (named_exports.keys(), 0..) |name, i| { - try writer.writeAll(name); - if (i < named_exports.count() - 1) { - try writer.writeAll(" , "); - } - } - try writer.writeAll("}"); - } - }; - - const ExportsFormatter = struct { - ctx: *BundleV2, - pretty: string, - source_index: Index.Int, - shadow: *ShadowEntryPoint, - pub fn format(self: ExportsFormatter, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { - var this = self.ctx; - const named_exports: js_ast.Ast.NamedExports = this.graph.ast.items(.named_exports)[self.source_index]; - try writer.writeAll("{"); - var shadow = self.shadow; - try shadow.named_exports.ensureUnusedCapacity(this.graph.allocator, named_exports.count()); - const last = named_exports.count() - 1; - for (named_exports.keys(), 0..) |name, i| { - try shadow.named_exports.push(this.graph.allocator, .{ - .from = name, - .to = name, - .source_index = self.source_index, - }); - - try writer.writeAll(name); - - if (i < last) { - try writer.writeAll(" , "); - } - } - try writer.writeAll("}"); - } - }; -}; - fn getRedirectId(id: u32) ?u32 { if (id == std.math.maxInt(u32)) { return null; @@ -12368,3 +12616,334 @@ fn targetFromHashbang(buffer: []const u8) ?options.Target { return null; } + +/// Utility to construct `Ast`s intended for generated code, such as the +/// boundary modules when dealing with server components. This is a saner +/// alternative to building a string, then sending it through `js_parser` +/// +/// For in-depth details on the fields, most of these are documented +/// inside of `js_parser` +pub const AstBuilder = struct { + allocator: std.mem.Allocator, + source: *const Logger.Source, + source_index: u31, + stmts: std.ArrayListUnmanaged(Stmt), + scopes: std.ArrayListUnmanaged(*Scope), + symbols: std.ArrayListUnmanaged(Symbol), + import_records: std.ArrayListUnmanaged(ImportRecord), + named_imports: js_ast.Ast.NamedImports, + named_exports: js_ast.Ast.NamedExports, + import_records_for_current_part: std.ArrayListUnmanaged(u32), + export_star_import_records: std.ArrayListUnmanaged(u32), + current_scope: *Scope, + log: Logger.Log, + module_ref: Ref, + declared_symbols: js_ast.DeclaredSymbol.List, + /// When set, codegen is altered + hot_reloading: bool, + + // stub fields for ImportScanner duck typing + comptime options: js_parser.Parser.Options = .{ + .jsx = .{}, + .bundle = true, + }, + comptime import_items_for_namespace: struct { + pub fn get(_: @This(), _: Ref) ?js_parser.ImportItemForNamespaceMap { + return null; + } + } = .{}, + pub const parser_features = struct { + pub const typescript = false; + }; + + pub fn init(allocator: std.mem.Allocator, source: *const Logger.Source, hot_reloading: bool) !AstBuilder { + const scope = try allocator.create(Scope); + scope.* = .{ + .kind = .entry, + .label_ref = null, + .parent = null, + .generated = .{}, + }; + var ab: AstBuilder = .{ + .allocator = allocator, + .current_scope = scope, + .source = source, + .source_index = @intCast(source.index.get()), + .stmts = .{}, + .scopes = .{}, + .symbols = .{}, + .import_records = .{}, + .import_records_for_current_part = .{}, + .named_imports = .{}, + .named_exports = .{}, + .log = Logger.Log.init(allocator), + .export_star_import_records = .{}, + .module_ref = Ref.None, + .declared_symbols = .{}, + .hot_reloading = hot_reloading, + }; + ab.module_ref = try ab.newSymbol(.other, "module"); + return ab; + } + + pub fn pushScope(p: *AstBuilder, kind: Scope.Kind) *js_ast.Scope { + try p.scopes.ensureUnusedCapacity(p.allocator, 1); + try p.current_scope.children.ensureUnusedCapacity(p.allocator, 1); + const scope = try p.allocator.create(Scope); + scope.* = .{ + .kind = kind, + .label_ref = null, + .parent = p.current_scope, + .generated = .{}, + }; + p.current_scope.children.appendAssumeCapacity(scope); + p.scopes.appendAssumeCapacity(p.current_scope); + p.current_scope = scope; + return scope; + } + + pub fn popScope(p: *AstBuilder) void { + p.current_scope = p.scopes.pop(); + } + + pub fn newSymbol(p: *AstBuilder, kind: Symbol.Kind, identifier: []const u8) !Ref { + const inner_index: Ref.Int = @intCast(p.symbols.items.len); + try p.symbols.append(p.allocator, .{ + .kind = kind, + .original_name = identifier, + .debug_mode_source_index = if (Environment.allow_assert) @intCast(p.source_index) else 0, + }); + const ref: Ref = .{ + .inner_index = inner_index, + .source_index = p.source_index, + .tag = .symbol, + }; + try p.current_scope.generated.push(p.allocator, ref); + return ref; + } + + pub fn getSymbol(p: *AstBuilder, ref: Ref) *Symbol { + bun.assert(ref.source_index == p.source.index.get()); + return &p.symbols.items[ref.inner_index]; + } + + pub fn addImportRecord(p: *AstBuilder, path: []const u8, kind: ImportKind) !u32 { + const index = p.import_records.items.len; + try p.import_records.append(p.allocator, .{ + .path = bun.fs.Path.init(path), + .kind = kind, + .range = .{}, + }); + return @intCast(index); + } + + pub fn addImportStmt( + p: *AstBuilder, + path: []const u8, + identifiers_to_import: anytype, + ) ![identifiers_to_import.len]Expr { + var out: [identifiers_to_import.len]Expr = undefined; + + const record = try p.addImportRecord(path, .stmt); + + var path_name = bun.fs.PathName.init(path); + const name = try strings.append(p.allocator, "import_", try path_name.nonUniqueNameString(p.allocator)); + const namespace_ref = try p.newSymbol(.other, name); + + const clauses = try p.allocator.alloc(js_ast.ClauseItem, identifiers_to_import.len); + + inline for (identifiers_to_import, &out, clauses) |import_id_untyped, *out_ref, *clause| { + const import_id: []const u8 = import_id_untyped; // must be given '[N][]const u8' + const ref = try p.newSymbol(.import, import_id); + if (p.hot_reloading) { + p.getSymbol(ref).namespace_alias = .{ + .namespace_ref = namespace_ref, + .alias = import_id, + .import_record_index = record, + }; + } + out_ref.* = p.newExpr(E.ImportIdentifier{ .ref = ref }); + clause.* = .{ + .name = .{ .loc = Logger.Loc.Empty, .ref = ref }, + .original_name = import_id, + .alias = import_id, + }; + } + + try p.appendStmt(S.Import{ + .namespace_ref = namespace_ref, + .import_record_index = record, + .items = clauses, + .is_single_line = identifiers_to_import.len < 1, + }); + + return out; + } + + pub fn appendStmt(p: *AstBuilder, data: anytype) !void { + try p.stmts.ensureUnusedCapacity(p.allocator, 1); + p.stmts.appendAssumeCapacity(p.newStmt(data)); + } + + pub fn newStmt(p: *AstBuilder, data: anytype) Stmt { + _ = p; + return Stmt.alloc(@TypeOf(data), data, Logger.Loc.Empty); + } + + pub fn newExpr(p: *AstBuilder, data: anytype) Expr { + _ = p; + return Expr.init(@TypeOf(data), data, Logger.Loc.Empty); + } + + pub fn newExternalSymbol(p: *AstBuilder, name: []const u8) !Ref { + const ref = try p.newSymbol(.other, name); + const sym = p.getSymbol(ref); + sym.must_not_be_renamed = true; + return ref; + } + + pub fn toBundledAst(p: *AstBuilder) !js_ast.BundledAst { + // TODO: missing import scanner + bun.assert(p.scopes.items.len == 0); + const module_scope = p.current_scope; + + var parts = try js_ast.Part.List.initCapacity(p.allocator, 2); + parts.len = 2; + parts.mut(0).* = .{}; + parts.mut(1).* = .{ + .stmts = p.stmts.items, + .can_be_removed_if_unused = false, + + // pretend that every symbol was used + .symbol_uses = uses: { + var map: js_ast.Part.SymbolUseMap = .{}; + try map.ensureTotalCapacity(p.allocator, p.symbols.items.len); + for (0..p.symbols.items.len) |i| { + map.putAssumeCapacity(Ref{ + .tag = .symbol, + .source_index = p.source_index, + .inner_index = @intCast(i), + }, .{ .count_estimate = 1 }); + } + break :uses map; + }, + }; + + const single_u32 = try BabyList(u32).fromSlice(p.allocator, &.{1}); + + var top_level_symbols_to_parts = js_ast.Ast.TopLevelSymbolToParts{}; + try top_level_symbols_to_parts.entries.setCapacity(p.allocator, module_scope.generated.len); + top_level_symbols_to_parts.entries.len = module_scope.generated.len; + const slice = top_level_symbols_to_parts.entries.slice(); + for ( + slice.items(.key), + slice.items(.value), + module_scope.generated.slice(), + ) |*k, *v, ref| { + k.* = ref; + v.* = single_u32; + } + try top_level_symbols_to_parts.reIndex(p.allocator); + + // For more details on this section, look at js_parser.toAST + // This is mimicking how it calls ImportScanner + if (p.hot_reloading) { + var hmr_transform_ctx = js_parser.ConvertESMExportsForHmr{ + .last_part = parts.last() orelse + unreachable, // was definitely allocated + }; + try hmr_transform_ctx.stmts.ensureTotalCapacity(p.allocator, prealloc_count: { + // get a estimate on how many statements there are going to be + const count = p.stmts.items.len; + break :prealloc_count count + 2; + }); + + _ = try js_parser.ImportScanner.scan(AstBuilder, p, p.stmts.items, false, true, &hmr_transform_ctx); + + const new_parts = try hmr_transform_ctx.finalize(p, parts.slice()); + // preserve original capacity + parts.len = @intCast(new_parts.len); + bun.assert(new_parts.ptr == parts.ptr); + } else { + const result = try js_parser.ImportScanner.scan(AstBuilder, p, p.stmts.items, false, false, {}); + parts.mut(1).stmts = result.stmts; + } + + parts.mut(1).declared_symbols = p.declared_symbols; + parts.mut(1).scopes = p.scopes.items; + parts.mut(1).import_record_indices = BabyList(u32).fromList(p.import_records_for_current_part); + + return .{ + .parts = parts, + .module_scope = module_scope.*, + .symbols = js_ast.Symbol.List.fromList(p.symbols), + .exports_ref = Ref.None, + .wrapper_ref = Ref.None, + .module_ref = p.module_ref, + .import_records = ImportRecord.List.fromList(p.import_records), + .export_star_import_records = &.{}, + .approximate_newline_count = 1, + .exports_kind = .esm, + .named_imports = p.named_imports, + .named_exports = p.named_exports, + .top_level_symbols_to_parts = top_level_symbols_to_parts, + .char_freq = .{}, + .flags = .{}, + // .nested_scope_slot_counts = if (p.options.features.minify_identifiers) + // renamer.assignNestedScopeSlots(p.allocator, p.scopes.items[0], p.symbols.items) + // else + // js_ast.SlotCounts{}, + }; + } + + // stub methods for ImportScanner duck typing + + pub fn generateTempRef(ab: *AstBuilder, name: ?[]const u8) Ref { + return ab.newSymbol(.other, name orelse "temp") catch bun.outOfMemory(); + } + + pub fn recordExport(p: *AstBuilder, _: Logger.Loc, alias: []const u8, ref: Ref) !void { + if (p.named_exports.get(alias)) |_| { + // Duplicate exports are an error + Output.panic( + "In generated file, duplicate export \"{s}\"", + .{alias}, + ); + } else { + try p.named_exports.put(p.allocator, alias, .{ .alias_loc = Logger.Loc.Empty, .ref = ref }); + } + } + + pub fn recordExportedBinding(p: *AstBuilder, binding: Binding) void { + switch (binding.data) { + .b_missing => {}, + .b_identifier => |ident| { + p.recordExport(binding.loc, p.symbols.items[ident.ref.innerIndex()].original_name, ident.ref) catch unreachable; + }, + .b_array => |array| { + for (array.items) |prop| { + p.recordExportedBinding(prop.binding); + } + }, + .b_object => |obj| { + for (obj.properties) |prop| { + p.recordExportedBinding(prop.value); + } + }, + } + } + + pub fn ignoreUsage(p: *AstBuilder, ref: Ref) void { + _ = p; + _ = ref; + } + + pub fn panic(p: *AstBuilder, comptime fmt: []const u8, args: anytype) noreturn { + _ = p; + Output.panic(fmt, args); + } + + pub fn @"module.exports"(p: *AstBuilder, loc: Logger.Loc) Expr { + return p.newExpr(E.Dot{ .name = "exports", .name_loc = loc, .target = p.newExpr(E.Identifier{ .ref = p.module_ref }) }); + } +}; diff --git a/src/bunfig.zig b/src/bunfig.zig index bccc9aa750b35..6ad52eac40a07 100644 --- a/src/bunfig.zig +++ b/src/bunfig.zig @@ -684,20 +684,6 @@ pub const Bunfig = struct { jsx.development = jsx_dev; } - switch (comptime cmd) { - .AutoCommand, .BuildCommand => { - if (json.get("publicDir")) |public_dir| { - try this.expectString(public_dir); - this.bunfig.router = Api.RouteConfig{ - .extensions = &.{}, - .dir = &.{}, - .static_dir = try public_dir.data.e_string.string(allocator), - }; - } - }, - else => {}, - } - if (json.get("debug")) |expr| { if (expr.get("editor")) |editor| { if (editor.asString(allocator)) |value| { @@ -738,13 +724,6 @@ pub const Bunfig = struct { } } - if (json.get("framework")) |expr| { - try this.expectString(expr); - this.bunfig.framework = Api.FrameworkConfig{ - .package = expr.asString(allocator).?, - }; - } - if (json.get("loader")) |expr| { try this.expect(expr, .e_object); const properties = expr.data.e_object.properties.slice(); diff --git a/src/cli.zig b/src/cli.zig index ad59352479eb1..67b44bdfbb775 100644 --- a/src/cli.zig +++ b/src/cli.zig @@ -253,7 +253,8 @@ pub const Arguments = struct { clap.parseParam("--chunk-naming Customize chunk filenames. Defaults to \"[name]-[hash].[ext]\"") catch unreachable, clap.parseParam("--asset-naming Customize asset filenames. Defaults to \"[name]-[hash].[ext]\"") catch unreachable, clap.parseParam("--react-fast-refresh Enable React Fast Refresh transform (does not emit hot-module code, use this for testing)") catch unreachable, - clap.parseParam("--server-components Enable React Server Components (experimental)") catch unreachable, + clap.parseParam("--server-components Enable Server Components (experimental)") catch unreachable, + clap.parseParam("--define-client ... When --server-components is set, these defines are applied to client components. Same format as --define") catch unreachable, clap.parseParam("--no-bundle Transpile file only, do not bundle") catch unreachable, clap.parseParam("--emit-dce-annotations Re-emit DCE annotations in bundles. Enabled by default unless --minify-whitespace is passed.") catch unreachable, clap.parseParam("--minify Enable all minification flags") catch unreachable, @@ -856,7 +857,19 @@ pub const Arguments = struct { } if (args.flag("--server-components")) { - ctx.bundler_options.react_server_components = true; + if (!bun.FeatureFlags.cli_server_components) { + // TODO: i want to disable this in non-canary + // but i also want to have tests that can run for PRs + } + ctx.bundler_options.server_components = true; + if (opts.target) |target| { + if (!bun.options.Target.from(target).isServerSide()) { + bun.Output.errGeneric("Cannot use client-side --target={s} with --server-components", .{@tagName(target)}); + Global.crash(); + } + } else { + opts.target = .bun; + } } if (args.flag("--react-fast-refresh")) { @@ -986,7 +999,7 @@ pub const Arguments = struct { } if (cmd == .BuildCommand) { - if (opts.entry_points.len == 0 and opts.framework == null) { + if (opts.entry_points.len == 0) { Output.prettyErrorln("bun build v" ++ Global.package_json_version_with_sha ++ "", .{}); Output.prettyError("error: Missing entrypoints. What would you like to bundle?\n\n", .{}); Output.flush(); @@ -1320,7 +1333,7 @@ pub const Command = struct { entry_naming: []const u8 = "[dir]/[name].[ext]", chunk_naming: []const u8 = "./[name]-[hash].[ext]", asset_naming: []const u8 = "./[name]-[hash].[ext]", - react_server_components: bool = false, + server_components: bool = false, react_fast_refresh: bool = false, code_splitting: bool = false, transform_only: bool = false, diff --git a/src/cli/build_command.zig b/src/cli/build_command.zig index f5bb2afd6c141..5630ee9b14950 100644 --- a/src/cli/build_command.zig +++ b/src/cli/build_command.zig @@ -30,9 +30,7 @@ const bundler = bun.bundler; const DotEnv = @import("../env_loader.zig"); const fs = @import("../fs.zig"); -const Router = @import("../router.zig"); const BundleV2 = @import("../bundler/bundle_v2.zig").BundleV2; -var estimated_input_lines_of_code_: usize = undefined; pub const BuildCommand = struct { const compile_define_keys = &.{ @@ -40,13 +38,10 @@ pub const BuildCommand = struct { "process.arch", }; - pub fn exec( - ctx: Command.Context, - ) !void { + pub fn exec(ctx: Command.Context) !void { Global.configureAllocator(.{ .long_running = true }); const allocator = ctx.allocator; var log = ctx.log; - estimated_input_lines_of_code_ = 0; if (ctx.bundler_options.compile) { // set this early so that externals are set up correctly and define is right ctx.args.target = .bun; @@ -56,12 +51,7 @@ pub const BuildCommand = struct { if (ctx.bundler_options.compile) { const compile_define_values = compile_target.defineValues(); - if (ctx.args.define == null) { - ctx.args.define = .{ - .keys = compile_define_keys, - .values = compile_define_values, - }; - } else if (ctx.args.define) |*define| { + if (ctx.args.define) |*define| { var keys = try std.ArrayList(string).initCapacity(bun.default_allocator, compile_define_keys.len + define.keys.len); keys.appendSliceAssumeCapacity(compile_define_keys); keys.appendSliceAssumeCapacity(define.keys); @@ -71,6 +61,11 @@ pub const BuildCommand = struct { define.keys = keys.items; define.values = values.items; + } else { + ctx.args.define = .{ + .keys = compile_define_keys, + .values = compile_define_values, + }; } } @@ -85,13 +80,14 @@ pub const BuildCommand = struct { Global.exit(1); return; } + var outfile = ctx.bundler_options.outfile; this_bundler.options.public_path = ctx.bundler_options.public_path; this_bundler.options.entry_naming = ctx.bundler_options.entry_naming; this_bundler.options.chunk_naming = ctx.bundler_options.chunk_naming; this_bundler.options.asset_naming = ctx.bundler_options.asset_naming; - this_bundler.options.react_server_components = ctx.bundler_options.react_server_components; + this_bundler.options.server_components = ctx.bundler_options.server_components; this_bundler.options.react_fast_refresh = ctx.bundler_options.react_fast_refresh; this_bundler.options.inline_entrypoint_import_meta_main = ctx.bundler_options.inline_entrypoint_import_meta_main; this_bundler.options.code_splitting = ctx.bundler_options.code_splitting; @@ -100,6 +96,12 @@ pub const BuildCommand = struct { this_bundler.options.minify_identifiers = ctx.bundler_options.minify_identifiers; this_bundler.options.emit_dce_annotations = ctx.bundler_options.emit_dce_annotations; this_bundler.options.ignore_dce_annotations = ctx.bundler_options.ignore_dce_annotations; + this_bundler.options.output_dir = ctx.bundler_options.outdir; + this_bundler.options.output_format = ctx.bundler_options.output_format; + + if (ctx.bundler_options.output_format == .internal_kit_dev) { + this_bundler.options.tree_shaking = false; + } if (ctx.bundler_options.compile) { if (ctx.bundler_options.code_splitting) { @@ -161,9 +163,6 @@ pub const BuildCommand = struct { } } - this_bundler.options.output_dir = ctx.bundler_options.outdir; - this_bundler.options.output_format = ctx.bundler_options.output_format; - var src_root_dir_buf: bun.PathBuffer = undefined; const src_root_dir: string = brk1: { const path = brk2: { @@ -194,28 +193,10 @@ pub const BuildCommand = struct { this_bundler.options.code_splitting = ctx.bundler_options.code_splitting; this_bundler.options.transform_only = ctx.bundler_options.transform_only; - if (this_bundler.options.transform_only) { - this_bundler.options.resolve_mode = .disable; - } - - this_bundler.resolver.opts = this_bundler.options; - + try this_bundler.configureDefines(); this_bundler.configureLinker(); - // This step is optional - // If it fails for any reason, ignore it and continue bundling - // This is partially a workaround for the 'error.MissingRoutesDir' error - this_bundler.configureRouter(true) catch { - this_bundler.options.routes.routes_enabled = false; - this_bundler.options.framework = null; - if (this_bundler.router) |*router| { - router.config.routes_enabled = false; - router.config.single_page_app_routing = false; - router.config.static_dir_enabled = false; - this_bundler.router = null; - } - }; - + this_bundler.resolver.opts = this_bundler.options; this_bundler.options.jsx.development = !this_bundler.options.production; this_bundler.resolver.opts.jsx.development = this_bundler.options.jsx.development; @@ -229,6 +210,37 @@ pub const BuildCommand = struct { .unspecified => {}, } + var client_bundler: bundler.Bundler = undefined; + if (this_bundler.options.server_components) { + client_bundler = try bundler.Bundler.init(allocator, log, ctx.args, null); + client_bundler.options = this_bundler.options; + client_bundler.options.target = .browser; + client_bundler.options.server_components = true; + try this_bundler.options.conditions.appendSlice(&.{"react-server"}); + this_bundler.options.react_fast_refresh = false; + this_bundler.options.minify_syntax = true; + client_bundler.options.minify_syntax = true; + client_bundler.options.define = try options.Define.init( + allocator, + if (ctx.args.define) |user_defines| + try options.Define.Data.fromInput(try options.stringHashMapFromArrays( + options.defines.RawDefines, + allocator, + user_defines.keys, + user_defines.values, + ), log, allocator) + else + null, + null, + ); + + try bun.kit.addImportMetaDefines(allocator, this_bundler.options.define, .development, .server); + try bun.kit.addImportMetaDefines(allocator, client_bundler.options.define, .development, .client); + + this_bundler.resolver.opts = this_bundler.options; + client_bundler.resolver.opts = client_bundler.options; + } + // var env_loader = this_bundler.env; if (ctx.debug.dump_environment_variables) { @@ -268,6 +280,7 @@ pub const BuildCommand = struct { break :brk (BundleV2.generateFromCLI( &this_bundler, + if (this_bundler.options.server_components) @panic("TODO") else null, allocator, bun.JSC.AnyEventLoop.init(ctx.allocator), std.crypto.random.int(u64), @@ -284,7 +297,6 @@ pub const BuildCommand = struct { Output.flush(); exitOrWatch(1, ctx.debug.hot_reload == .watch); - unreachable; }).items; }; const bundled_end = std.time.nanoTimestamp(); @@ -519,7 +531,7 @@ pub const BuildCommand = struct { } }; -fn exitOrWatch(code: u8, watch: bool) void { +fn exitOrWatch(code: u8, watch: bool) noreturn { if (watch) { // the watcher thread will exit the process std.time.sleep(std.math.maxInt(u64) - 1); diff --git a/src/codegen/kit-codegen.ts b/src/codegen/kit-codegen.ts index d3bb39f2da29d..2c122c61982fc 100644 --- a/src/codegen/kit-codegen.ts +++ b/src/codegen/kit-codegen.ts @@ -41,10 +41,14 @@ const results = await Promise.allSettled( // @ts-ignore let code = await result.outputs[0].text(); - // A second pass is used to convert global variables into parameters, while - // allowing for renaming to properly function when minification is enabled. - const in_names = ["input_graph", "config", mode === "server" && "server_fetch_function"].filter(Boolean); - const combined_source = ` + // A second pass is used to convert global variables into parameters, while + // allowing for renaming to properly function when minification is enabled. + const in_names = [ + 'input_graph', + 'config', + mode === 'server' && 'server_exports' + ].filter(Boolean); + const combined_source = ` __marker__; let ${in_names.join(",")}; __marker__(${in_names.join(",")}); diff --git a/src/crash_handler.zig b/src/crash_handler.zig index f7d77d3d6e75f..39dc6d68c9b15 100644 --- a/src/crash_handler.zig +++ b/src/crash_handler.zig @@ -137,10 +137,10 @@ pub const Action = union(enum) { \\ part range: {d}..{d} , .{ - data.linkerContext().graph.bundler_graph.input_files + data.linkerContext().parse_graph.input_files .items(.source)[data.chunk.entry_point.source_index] .path.text, - data.linkerContext().graph.bundler_graph.input_files + data.linkerContext().parse_graph.input_files .items(.source)[data.part_range.source_index.get()] .path.text, data.part_range.part_index_begin, diff --git a/src/css/writer.zig b/src/css/writer.zig deleted file mode 100644 index c557d9982e3b7..0000000000000 --- a/src/css/writer.zig +++ /dev/null @@ -1,68 +0,0 @@ -const std = @import("std"); -const Allocator = std.mem.Allocator; -const bun = @import("root").bun; -const logger = bun.logger; -const Log = logger.Log; - -pub const css = @import("./css_parser.zig"); - -/// Wrapper around std.io.GenericWriter -pub fn GenericWriter( - comptime Context: type, - comptime WriteError: type, - comptime writeFn: fn (context: Context, bytes: []const u8) WriteError!usize, -) type { - return struct { - context: Context, - - const Self = @This(); - pub const Error = css.PrintErr; - - pub inline fn write(self: Self, bytes: []const u8) Error!usize { - return writeFn(self.context, bytes) catch ; - } - - pub inline fn writeAll(self: Self, bytes: []const u8) Error!void { - return @errorCast(self.any().writeAll(bytes)); - } - - pub inline fn print(self: Self, comptime format: []const u8, args: anytype) Error!void { - return @errorCast(self.any().print(format, args)); - } - - pub inline fn writeByte(self: Self, byte: u8) Error!void { - return @errorCast(self.any().writeByte(byte)); - } - - pub inline fn writeByteNTimes(self: Self, byte: u8, n: usize) Error!void { - return @errorCast(self.any().writeByteNTimes(byte, n)); - } - - pub inline fn writeBytesNTimes(self: Self, bytes: []const u8, n: usize) Error!void { - return @errorCast(self.any().writeBytesNTimes(bytes, n)); - } - - pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.builtin.Endian) Error!void { - return @errorCast(self.any().writeInt(T, value, endian)); - } - - pub inline fn writeStruct(self: Self, value: anytype) Error!void { - return @errorCast(self.any().writeStruct(value)); - } - - pub inline fn writeStructEndian(self: Self, value: anytype, endian: std.builtin.Endian) Error!void { - return @errorCast(self.any().writeStructEndian(value, endian)); - } - - pub inline fn any(self: *const Self) AnyWriter { - return .{ - .context = @ptrCast(&self.context), - .writeFn = typeErasedWriteFn, - }; - } - - fn typeErasedWriteFn(context: *const anyopaque, bytes: []const u8) anyerror!usize { - const ptr: *const Context = @alignCast(@ptrCast(context)); - return writeFn(ptr.*, bytes); - } - }; diff --git a/src/darwin_c.zig b/src/darwin_c.zig index cfea6b1f38671..6771f06ad8bce 100644 --- a/src/darwin_c.zig +++ b/src/darwin_c.zig @@ -826,9 +826,9 @@ pub const sockaddr_dl = extern struct { sdl_slen: u8, // link layer selector length */ sdl_data: [12]u8, // minimum work area, can be larger; contains both if name and ll address */ //#ifndef __APPLE__ - // /* For TokenRing */ - // u_short sdl_rcf; /* source routing control */ - // u_short sdl_route[16]; /* source routing information */ + // /* For TokenRing */ + // u_short sdl_rcf; /* source routing control */ + // u_short sdl_route[16]; /* source routing information */ //#endif }; diff --git a/src/defines.zig b/src/defines.zig index 43cdd144b6cce..504585e39e67c 100644 --- a/src/defines.zig +++ b/src/defines.zig @@ -236,7 +236,7 @@ pub const Define = struct { } } - pub fn init(allocator: std.mem.Allocator, _user_defines: ?UserDefines, string_defines: ?UserDefinesArray) std.mem.Allocator.Error!*@This() { + pub fn init(allocator: std.mem.Allocator, _user_defines: ?UserDefines, string_defines: ?UserDefinesArray) bun.OOM!*@This() { var define = try allocator.create(Define); define.allocator = allocator; define.identifiers = bun.StringHashMap(IdentifierDefine).init(allocator); diff --git a/src/feature_flags.zig b/src/feature_flags.zig index 9930474c10d3f..c02c0ac35f824 100644 --- a/src/feature_flags.zig +++ b/src/feature_flags.zig @@ -158,7 +158,11 @@ pub fn isLibdeflateEnabled() bool { return !bun.getRuntimeFeatureFlag("BUN_FEATURE_FLAG_NO_LIBDEFLATE"); } -/// Enable experimental bundler tools, codenamed "bun kit" +/// Enable Bun Kit's experimental bundler tools pub const kit = env.is_canary or env.isDebug; +/// Enable --server-components +pub const cli_server_components = kit; + +/// Enable CSS handling in `bun build` pub const css = env.is_canary or env.isDebug; diff --git a/src/fs.zig b/src/fs.zig index 20c9317d6f526..d0628f5019be3 100644 --- a/src/fs.zig +++ b/src/fs.zig @@ -1403,7 +1403,7 @@ pub const FileSystem = struct { return cache; } - // // Stores the file entries for directories we've listed before + // // Stores the file entries for directories we've listed before // entries_mutex: std.Mutex // entries map[string]entriesOrErr @@ -1627,7 +1627,9 @@ threadlocal var join_buf: [1024]u8 = undefined; pub const Path = struct { pretty: string, text: string, + // TODO(@paperdave): remove the default of this field. namespace: string = "unspecified", + // TODO(@paperdave): investigate removing or simplifying this property name: PathName, is_disabled: bool = false, is_symlink: bool = false, @@ -1855,9 +1857,9 @@ pub const Path = struct { }; } - pub fn initWithNamespaceVirtual(comptime text: string, comptime namespace: string, comptime package: string) Path { - return Path{ - .pretty = comptime "node:" ++ package, + pub inline fn initWithNamespaceVirtual(comptime text: string, comptime namespace: string, comptime package: string) Path { + return comptime Path{ + .pretty = namespace ++ ":" ++ package, .is_symlink = true, .text = text, .namespace = namespace, @@ -1865,6 +1867,16 @@ pub const Path = struct { }; } + pub inline fn initWithNamespaceComptime(comptime namespace: string, comptime package: string) Path { + return comptime Path{ + .pretty = namespace ++ ":" ++ package, + .is_symlink = true, + .text = package, + .namespace = namespace, + .name = PathName.init(package), + }; + } + pub fn isBefore(a: *Path, b: Path) bool { return a.namespace > b.namespace || (a.namespace == b.namespace and (a.text < b.text || diff --git a/src/import_record.zig b/src/import_record.zig index c21e2223c8f18..7e5ceba4a191c 100644 --- a/src/import_record.zig +++ b/src/import_record.zig @@ -7,27 +7,20 @@ const Index = @import("ast/base.zig").Index; const Api = @import("./api/schema.zig").Api; pub const ImportKind = enum(u8) { - // An entry point provided by the user + /// An entry point provided by the user entry_point, - - // An ES6 import or re-export statement + /// An ES6 import or re-export statement stmt, - - // A call to "require()" + /// A call to "require()" require, - - // An "import()" expression with a string argument + /// An "import()" expression with a string argument dynamic, - /// A call to "require.resolve()" require_resolve, - /// A CSS "@import" rule at, - /// A CSS "@import" rule with import conditions at_conditional, - /// A CSS "url(...)" token url, @@ -103,9 +96,8 @@ pub const ImportKind = enum(u8) { pub const ImportRecord = struct { range: logger.Range, path: fs.Path, - - /// 0 is invalid - module_id: u32 = 0, + kind: ImportKind, + tag: Tag = .none, source_index: Index = Index.invalid, @@ -147,9 +139,6 @@ pub const ImportRecord = struct { /// calling the "__reExport()" helper function calls_runtime_re_export_fn: bool = false, - /// Tell the printer to use runtime code to resolve this import/export - do_commonjs_transform_in_printer: bool = false, - /// True for require calls like this: "try { require() } catch {}". In this /// case we shouldn't generate an error if the path could not be resolved. is_inside_try_body: bool = false, @@ -165,10 +154,6 @@ pub const ImportRecord = struct { /// If true, this import can be removed if it's unused is_external_without_side_effects: bool = false, - kind: ImportKind, - - tag: Tag = Tag.none, - /// Tell the printer to print the record as "foo:my-path" instead of "path" /// where "foo" is the namespace /// @@ -185,32 +170,31 @@ pub const ImportRecord = struct { } pub const Tag = enum { + /// A normal import to a user's source file none, - /// JSX auto-import for React Fast Refresh - react_refresh, - /// JSX auto-import for jsxDEV or jsx - jsx_import, - /// JSX auto-import for Fragment or createElement - jsx_classic, - /// Uses the `bun` import specifier - /// import {foo} from "bun"; + /// An import to 'bun' bun, - /// Uses the `bun:test` import specifier - /// import {expect} from "bun:test"; + /// An import to 'bun:test' bun_test, + /// A builtin module, such as `node:fs` or `bun:sqlite` + builtin, + /// An import to the internal runtime runtime, - hardcoded, - /// A macro: import specifier OR a macro import + /// A 'macro:' import namespace or 'with { type: "macro" }' macro, - internal, - /// Referenced "use client"; at the start of the file + // TODO: evaluate if the following two can be deleted + /// The imported file has "use client" at the start. This is + /// a boundary from server -> client side. react_client_component, - - /// A file starting with "use client"; imported a server entry point - /// We don't actually support this right now. + /// The imported file has "use server" at the start. This is + /// a boundary from client -> server side. react_server_component, + /// For Bun Kit, if a module in the server graph should actually + /// crossover to the SSR graph. See kit.Framework.ServerComponents.separate_ssr_graph + kit_resolve_to_ssr_graph, + with_type_sqlite, with_type_sqlite_embedded, with_type_text, @@ -239,14 +223,18 @@ pub const ImportRecord = struct { pub fn isSQLite(this: Tag) bool { return switch (this) { - .with_type_sqlite, .with_type_sqlite_embedded => true, + .with_type_sqlite, + .with_type_sqlite_embedded, + => true, else => false, }; } pub fn isReactReference(this: Tag) bool { return switch (this) { - .react_client_component, .react_server_component => true, + .react_client_component, + .react_server_component, + => true, else => false, }; } @@ -261,8 +249,8 @@ pub const ImportRecord = struct { pub fn useDirective(this: Tag) bun.JSAst.UseDirective { return switch (this) { - .react_client_component => .@"use client", - .react_server_component => .@"use server", + .react_client_component => .client, + .react_server_component => .server, else => .none, }; } diff --git a/src/js_ast.zig b/src/js_ast.zig index bbd318cbe6093..6389df833ad48 100644 --- a/src/js_ast.zig +++ b/src/js_ast.zig @@ -12,8 +12,8 @@ const MutableString = bun.MutableString; const stringZ = bun.stringZ; const default_allocator = bun.default_allocator; const C = bun.C; -const Ref = @import("ast/base.zig").Ref; -const Index = @import("ast/base.zig").Index; +pub const Ref = @import("ast/base.zig").Ref; +pub const Index = @import("ast/base.zig").Index; const RefHashCtx = @import("ast/base.zig").RefHashCtx; const ObjectPool = @import("./pool.zig").ObjectPool; const ImportRecord = @import("import_record.zig").ImportRecord; @@ -552,7 +552,7 @@ pub const B = union(Binding.Tag) { }; pub const ClauseItem = struct { - alias: string = "", + alias: string, alias_loc: logger.Loc = logger.Loc.Empty, name: LocRef, @@ -838,16 +838,15 @@ pub const G = struct { }; pub const Property = struct { - - // This is used when parsing a pattern that uses default values: - // - // [a = 1] = []; - // ({a = 1} = {}); - // - // It's also used for class fields: - // - // class Foo { a = 1 } - // + /// This is used when parsing a pattern that uses default values: + /// + /// [a = 1] = []; + /// ({a = 1} = {}); + /// + /// It's also used for class fields: + /// + /// class Foo { a = 1 } + /// initializer: ?ExprNodeIndex = null, kind: Kind = .normal, flags: Flags.Property.Set = Flags.Property.None, @@ -1153,55 +1152,53 @@ pub const Symbol = struct { } pub const Kind = enum { - - // An unbound symbol is one that isn't declared in the file it's referenced - // in. For example, using "window" without declaring it will be unbound. + /// An unbound symbol is one that isn't declared in the file it's referenced + /// in. For example, using "window" without declaring it will be unbound. unbound, - // This has special merging behavior. You're allowed to re-declare these - // symbols more than once in the same scope. These symbols are also hoisted - // out of the scope they are declared in to the closest containing function - // or module scope. These are the symbols with this kind: - // - // - Function arguments - // - Function statements - // - Variables declared using "var" - // + /// This has special merging behavior. You're allowed to re-declare these + /// symbols more than once in the same scope. These symbols are also hoisted + /// out of the scope they are declared in to the closest containing function + /// or module scope. These are the symbols with this kind: + /// + /// - Function arguments + /// - Function statements + /// - Variables declared using "var" hoisted, hoisted_function, - // There's a weird special case where catch variables declared using a simple - // identifier (i.e. not a binding pattern) block hoisted variables instead of - // becoming an error: - // - // var e = 0; - // try { throw 1 } catch (e) { - // print(e) // 1 - // var e = 2 - // print(e) // 2 - // } - // print(e) // 0 (since the hoisting stops at the catch block boundary) - // - // However, other forms are still a syntax error: - // - // try {} catch (e) { let e } - // try {} catch ({e}) { var e } - // - // This symbol is for handling this weird special case. + /// There's a weird special case where catch variables declared using a simple + /// identifier (i.e. not a binding pattern) block hoisted variables instead of + /// becoming an error: + /// + /// var e = 0; + /// try { throw 1 } catch (e) { + /// print(e) // 1 + /// var e = 2 + /// print(e) // 2 + /// } + /// print(e) // 0 (since the hoisting stops at the catch block boundary) + /// + /// However, other forms are still a syntax error: + /// + /// try {} catch (e) { let e } + /// try {} catch ({e}) { var e } + /// + /// This symbol is for handling this weird special case. catch_identifier, - // Generator and async functions are not hoisted, but still have special - // properties such as being able to overwrite previous functions with the - // same name + /// Generator and async functions are not hoisted, but still have special + /// properties such as being able to overwrite previous functions with the + /// same name generator_or_async_function, - // This is the special "arguments" variable inside functions + /// This is the special "arguments" variable inside functions arguments, - // Classes can merge with TypeScript namespaces. + /// Classes can merge with TypeScript namespaces. class, - // A class-private identifier (i.e. "#foo"). + /// A class-private identifier (i.e. "#foo"). private_field, private_method, private_get, @@ -1213,25 +1210,26 @@ pub const Symbol = struct { private_static_set, private_static_get_set_pair, - // Labels are in their own namespace + /// Labels are in their own namespace label, - // TypeScript enums can merge with TypeScript namespaces and other TypeScript - // enums. + /// TypeScript enums can merge with TypeScript namespaces and other TypeScript + /// enums. ts_enum, - // TypeScript namespaces can merge with classes, functions, TypeScript enums, - // and other TypeScript namespaces. + /// TypeScript namespaces can merge with classes, functions, TypeScript enums, + /// and other TypeScript namespaces. ts_namespace, - // In TypeScript, imports are allowed to silently collide with symbols within - // the module. Presumably this is because the imports may be type-only. + /// In TypeScript, imports are allowed to silently collide with symbols within + /// the module. Presumably this is because the imports may be type-only. + /// Import statement namespace references should NOT have this set. import, - // Assigning to a "const" symbol will throw a TypeError at runtime + /// Assigning to a "const" symbol will throw a TypeError at runtime constant, - // This annotates all other symbols that don't have special behavior. + /// This annotates all other symbols that don't have special behavior. other, pub fn jsonStringify(self: @This(), writer: anytype) !void { @@ -1294,7 +1292,7 @@ pub const Symbol = struct { // single inner array, so you can join the maps together by just make a // single outer array containing all of the inner arrays. See the comment on // "Ref" for more detail. - symbols_for_source: NestedList = NestedList{}, + symbols_for_source: NestedList = .{}, pub fn dump(this: Map) void { defer Output.flush(); @@ -1428,21 +1426,6 @@ pub const Symbol = struct { pub inline fn isHoisted(self: *const Symbol) bool { return Symbol.isKindHoisted(self.kind); } - - pub fn isReactComponentishName(symbol: *const Symbol) bool { - switch (symbol.kind) { - .hoisted, .hoisted_function, .constant, .class, .other => { - return switch (symbol.original_name[0]) { - 'A'...'Z' => true, - else => false, - }; - }, - - else => { - return false; - }, - } - } }; pub const OptionalChain = enum(u1) { @@ -3257,6 +3240,14 @@ pub const Stmt = struct { }; }; + pub fn StoredData(tag: Tag) type { + const T = std.meta.FieldType(Data, tag); + return switch (@typeInfo(T)) { + .Pointer => |ptr| ptr.child, + else => T, + }; + } + pub fn caresAboutScope(self: *Stmt) bool { return switch (self.data) { .s_block, .s_empty, .s_debugger, .s_expr, .s_if, .s_for, .s_for_in, .s_for_of, .s_do_while, .s_while, .s_with, .s_try, .s_switch, .s_return, .s_throw, .s_break, .s_continue, .s_directive => { @@ -6265,6 +6256,14 @@ pub const Expr = struct { return @as(Expr.Tag, self) == .e_string; } }; + + pub fn StoredData(tag: Tag) type { + const T = std.meta.FieldType(Data, tag); + return switch (@typeInfo(T)) { + .Pointer => |ptr| ptr.child, + else => T, + }; + } }; pub const EnumValue = struct { @@ -6437,7 +6436,7 @@ pub const S = struct { // when converting this module to a CommonJS module. namespace_ref: Ref, default_name: ?LocRef = null, - items: []ClauseItem = &([_]ClauseItem{}), + items: []ClauseItem = &.{}, star_name_loc: ?logger.Loc = null, import_record_index: u32, is_single_line: bool = false, @@ -6806,7 +6805,6 @@ pub const Ast = struct { runtime_import_record_id: ?u32 = null, needs_runtime: bool = false, - externals: []u32 = &[_]u32{}, // This is a list of CommonJS features. When a file uses CommonJS features, // it's not a candidate for "flat bundling" and must be wrapped in its own // closure. @@ -6831,7 +6829,6 @@ pub const Ast = struct { hashbang: string = "", directive: ?string = null, - url_for_css: ?string = null, parts: Part.List = Part.List{}, // This list may be mutated later, so we should store the capacity symbols: Symbol.List = Symbol.List{}, @@ -6847,11 +6844,11 @@ pub const Ast = struct { // These are used when bundling. They are filled in during the parser pass // since we already have to traverse the AST then anyway and the parser pass // is conveniently fully parallelized. - named_imports: NamedImports = NamedImports.init(bun.failing_allocator), - named_exports: NamedExports = NamedExports.init(bun.failing_allocator), + named_imports: NamedImports = .{}, + named_exports: NamedExports = .{}, export_star_import_records: []u32 = &([_]u32{}), - allocator: std.mem.Allocator, + // allocator: std.mem.Allocator, top_level_symbols_to_parts: TopLevelSymbolToParts = .{}, commonjs_named_exports: CommonJSNamedExports = .{}, @@ -6875,15 +6872,14 @@ pub const Ast = struct { }; pub const CommonJSNamedExports = bun.StringArrayHashMapUnmanaged(CommonJSNamedExport); - pub const NamedImports = std.ArrayHashMap(Ref, NamedImport, RefHashCtx, true); - pub const NamedExports = bun.StringArrayHashMap(NamedExport); + pub const NamedImports = std.ArrayHashMapUnmanaged(Ref, NamedImport, RefHashCtx, true); + pub const NamedExports = bun.StringArrayHashMapUnmanaged(NamedExport); pub const ConstValuesMap = std.ArrayHashMapUnmanaged(Ref, Expr, RefHashCtx, false); pub const TsEnumsMap = std.ArrayHashMapUnmanaged(Ref, bun.StringHashMapUnmanaged(InlinedEnumValue), RefHashCtx, false); pub fn fromParts(parts: []Part) Ast { return Ast{ .parts = Part.List.init(parts), - .allocator = bun.default_allocator, .runtime_imports = .{}, }; } @@ -6891,12 +6887,11 @@ pub const Ast = struct { pub fn initTest(parts: []Part) Ast { return Ast{ .parts = Part.List.init(parts), - .allocator = bun.default_allocator, .runtime_imports = .{}, }; } - pub const empty = Ast{ .parts = Part.List{}, .runtime_imports = .{}, .allocator = bun.default_allocator }; + pub const empty = Ast{ .parts = Part.List{}, .runtime_imports = .{} }; pub fn toJSON(self: *const Ast, _: std.mem.Allocator, stream: anytype) !void { const opts = std.json.StringifyOptions{ .whitespace = std.json.StringifyOptions.Whitespace{ @@ -6909,7 +6904,6 @@ pub const Ast = struct { pub fn deinit(this: *Ast) void { // TODO: assert mimalloc-owned memory if (this.parts.len > 0) this.parts.deinitWithAllocator(bun.default_allocator); - if (this.externals.len > 0) bun.default_allocator.free(this.externals); if (this.symbols.len > 0) this.symbols.deinitWithAllocator(bun.default_allocator); if (this.import_records.len > 0) this.import_records.deinitWithAllocator(bun.default_allocator); } @@ -6924,22 +6918,18 @@ pub const Ast = struct { /// So we make a slimmer version of Ast for bundling that doesn't allocate as much memory pub const BundledAst = struct { approximate_newline_count: u32 = 0, - nested_scope_slot_counts: SlotCounts = SlotCounts{}, - externals: []u32 = &[_]u32{}, + nested_scope_slot_counts: SlotCounts = .{}, - exports_kind: ExportsKind = ExportsKind.none, + exports_kind: ExportsKind = .none, /// These are stored at the AST level instead of on individual AST nodes so /// they can be manipulated efficiently without a full AST traversal import_records: ImportRecord.List = .{}, hashbang: string = "", - directive: string = "", - url_for_css: string = "", - parts: Part.List = Part.List{}, - // This list may be mutated later, so we should store the capacity - symbols: Symbol.List = Symbol.List{}, - module_scope: Scope = Scope{}, + parts: Part.List = .{}, + symbols: Symbol.List = .{}, + module_scope: Scope = .{}, char_freq: CharFreq = undefined, exports_ref: Ref = Ref.None, module_ref: Ref = Ref.None, @@ -6949,18 +6939,19 @@ pub const BundledAst = struct { // These are used when bundling. They are filled in during the parser pass // since we already have to traverse the AST then anyway and the parser pass // is conveniently fully parallelized. - named_imports: NamedImports = NamedImports.init(bun.failing_allocator), - named_exports: NamedExports = NamedExports.init(bun.failing_allocator), - export_star_import_records: []u32 = &([_]u32{}), + named_imports: NamedImports = .{}, + named_exports: NamedExports = .{}, + export_star_import_records: []u32 = &.{}, - allocator: std.mem.Allocator, top_level_symbols_to_parts: TopLevelSymbolToParts = .{}, commonjs_named_exports: CommonJSNamedExports = .{}, redirect_import_record_index: u32 = std.math.maxInt(u32), - /// Only populated when bundling + /// Only populated when bundling. When --server-components is passed, this + /// will be .browser when it is a client component, and the server's target + /// on the server. target: bun.options.Target = .browser, // const_values: ConstValuesMap = .{}, @@ -6996,15 +6987,12 @@ pub const BundledAst = struct { return .{ .approximate_newline_count = this.approximate_newline_count, .nested_scope_slot_counts = this.nested_scope_slot_counts, - .externals = this.externals, .exports_kind = this.exports_kind, .import_records = this.import_records, .hashbang = this.hashbang, - .directive = this.directive, - // .url_for_css = this.url_for_css, .parts = this.parts, // This list may be mutated later, so we should store the capacity .symbols = this.symbols, @@ -7022,7 +7010,6 @@ pub const BundledAst = struct { .named_exports = this.named_exports, .export_star_import_records = this.export_star_import_records, - .allocator = this.allocator, .top_level_symbols_to_parts = this.top_level_symbols_to_parts, .commonjs_named_exports = this.commonjs_named_exports, @@ -7048,14 +7035,12 @@ pub const BundledAst = struct { return .{ .approximate_newline_count = @as(u32, @truncate(ast.approximate_newline_count)), .nested_scope_slot_counts = ast.nested_scope_slot_counts, - .externals = ast.externals, .exports_kind = ast.exports_kind, .import_records = ast.import_records, .hashbang = ast.hashbang, - .directive = ast.directive orelse "", // .url_for_css = ast.url_for_css orelse "", .parts = ast.parts, // This list may be mutated later, so we should store the capacity @@ -7074,7 +7059,7 @@ pub const BundledAst = struct { .named_exports = ast.named_exports, .export_star_import_records = ast.export_star_import_records, - .allocator = ast.allocator, + // .allocator = ast.allocator, .top_level_symbols_to_parts = ast.top_level_symbols_to_parts, .commonjs_named_exports = ast.commonjs_named_exports, @@ -7110,27 +7095,27 @@ pub const Span = struct { /// block are merged into a single namespace while the non-exported code is /// still scoped to just within that block: /// -/// let x = 1; -/// namespace Foo { -/// let x = 2; -/// export let y = 3; -/// } -/// namespace Foo { -/// console.log(x); // 1 -/// console.log(y); // 3 -/// } +/// let x = 1; +/// namespace Foo { +/// let x = 2; +/// export let y = 3; +/// } +/// namespace Foo { +/// console.log(x); // 1 +/// console.log(y); // 3 +/// } /// /// Doing this also works inside an enum: /// -/// enum Foo { -/// A = 3, -/// B = A + 1, -/// } -/// enum Foo { -/// C = A + 2, -/// } -/// console.log(Foo.B) // 4 -/// console.log(Foo.C) // 5 +/// enum Foo { +/// A = 3, +/// B = A + 1, +/// } +/// enum Foo { +/// C = A + 2, +/// } +/// console.log(Foo.B) // 4 +/// console.log(Foo.C) // 5 /// /// This is a form of identifier lookup that works differently than the /// hierarchical scope-based identifier lookup in JavaScript. Lookup now needs @@ -8468,14 +8453,22 @@ pub const ASTMemoryAllocator = struct { } }; -pub const UseDirective = enum { +pub const UseDirective = enum(u2) { + // TODO: Remove this, and provide `UseDirective.Optional` instead none, - @"use client", - @"use server", + /// "use client" + client, + /// "use server" + server, + + pub const Boundering = enum(u2) { + client = @intFromEnum(UseDirective.client), + server = @intFromEnum(UseDirective.server), + }; pub const Flags = struct { - is_client: bool = false, - is_server: bool = false, + has_any_client: bool = false, + has_any_server: bool = false, }; pub fn isBoundary(this: UseDirective, other: UseDirective) bool { @@ -8485,22 +8478,13 @@ pub const UseDirective = enum { return true; } - pub fn boundering(this: UseDirective, other: UseDirective) ?UseDirective { + pub fn boundering(this: UseDirective, other: UseDirective) ?Boundering { if (this == other or other == .none) return null; - - return other; + return @enumFromInt(@intFromEnum(other)); } - pub const EntryPoint = struct { - source_index: Index.Int, - use_directive: UseDirective, - }; - - pub const List = std.MultiArrayList(UseDirective.EntryPoint); - - // TODO: remove this, add an onModuleDirective() callback to the parser - pub fn parse(contents: []const u8) UseDirective { + pub fn parse(contents: []const u8) ?UseDirective { const truncated = std.mem.trimLeft(u8, contents, " \t\n\r;"); if (truncated.len < "'use client';".len) @@ -8515,30 +8499,124 @@ pub const UseDirective = enum { const unquoted = directive_string[1 .. directive_string.len - 2]; - if (strings.eqlComptime( - unquoted, - "use client", - )) { - return .@"use client"; + if (strings.eqlComptime(unquoted, "use client")) { + return .client; } - if (strings.eqlComptime( - unquoted, - "use server", - )) { - return .@"use server"; + if (strings.eqlComptime(unquoted, "use server")) { + return .server; } - return .none; + return null; } +}; - pub fn target(this: UseDirective, default: bun.options.Target) bun.options.Target { - return switch (this) { - .none => default, - .@"use client" => .browser, - .@"use server" => .bun, +/// Represents a boundary between client and server code. Every boundary +/// gets bundled twice, once for the desired target, and once to generate +/// a module of "references". Specifically, the generated file takes the +/// canonical Ast as input to derive a wrapper. See `Framework.ServerComponents` +/// for more details about this generated file. +/// +/// This is sometimes abbreviated as SCB +pub const ServerComponentBoundary = struct { + use_directive: UseDirective, + + /// The index of the original file. + source_index: Index.Int, + + /// Index to the file imported on the opposite platform, which is + /// generated by the bundler. For client components, this is the + /// server's code. For server actions, this is the client's code. + reference_source_index: Index.Int, + + /// When `kit.Framework.ServerComponents.separate_ssr_graph` is enabled this + /// points to the separated module. When the SSR graph is not separate, this is + /// equal to `reference_source_index` + // + // TODO: Is this used for server actions. + ssr_source_index: Index.Int, + + /// The requirements for this data structure is to have reasonable lookup + /// speed, but also being able to pull a `[]const Index.Int` of all + /// boundaries for iteration. + pub const List = struct { + list: std.MultiArrayList(ServerComponentBoundary) = .{}, + /// Used to facilitate fast lookups into `items` by `.source_index` + map: Map = .{}, + + const Map = std.ArrayHashMapUnmanaged(void, void, struct {}, true); + + /// Can only be called on the bundler thread. + pub fn put( + m: *List, + allocator: std.mem.Allocator, + source_index: Index.Int, + use_directive: UseDirective, + reference_source_index: Index.Int, + ssr_source_index: Index.Int, + ) !void { + try m.list.append(allocator, .{ + .source_index = source_index, + .use_directive = use_directive, + .reference_source_index = reference_source_index, + .ssr_source_index = ssr_source_index, + }); + const gop = try m.map.getOrPutAdapted( + allocator, + source_index, + Adapter{ .list = m.list.slice() }, + ); + bun.assert(!gop.found_existing); + } + + /// Can only be called on the bundler thread. + pub fn getIndex(l: *const List, real_source_index: Index.Int) ?usize { + return l.map.getIndexAdapted( + real_source_index, + Adapter{ .list = l.list.slice() }, + ); + } + + /// Use this to improve speed of accessing fields at the cost of + /// storing more pointers. Invalidated when input is mutated. + pub fn slice(l: List) Slice { + return .{ .list = l.list.slice(), .map = l.map }; + } + + pub const Slice = struct { + list: std.MultiArrayList(ServerComponentBoundary).Slice, + map: Map, + + pub fn getIndex(l: *const Slice, real_source_index: Index.Int) ?usize { + return l.map.getIndexAdapted( + real_source_index, + Adapter{ .list = l.list }, + ) orelse return null; + } + + pub fn getReferenceSourceIndex(l: *const Slice, real_source_index: Index.Int) ?u32 { + const i = l.map.getIndexAdapted( + real_source_index, + Adapter{ .list = l.list }, + ) orelse return null; + bun.unsafeAssert(l.list.capacity > 0); // optimize MultiArrayList.Slice.items + return l.list.items(.reference_source_index)[i]; + } }; - } + + pub const Adapter = struct { + list: std.MultiArrayList(ServerComponentBoundary).Slice, + + pub fn hash(_: Adapter, key: Index.Int) u32 { + return std.hash.uint32(key); + } + + pub fn eql(adapt: Adapter, a: Index.Int, _: void, b_index: usize) bool { + bun.unsafeAssert(adapt.list.capacity > 0); // optimize MultiArrayList.Slice.items + return a == adapt.list.items(.source_index)[b_index]; + } + }; + }; }; pub const GlobalStoreHandle = struct { diff --git a/src/js_parser.zig b/src/js_parser.zig index 6b03e651aa669..047b6bfa33373 100644 --- a/src/js_parser.zig +++ b/src/js_parser.zig @@ -1061,7 +1061,7 @@ pub const ImportScanner = struct { stmts: []Stmt, will_transform_to_common_js: bool, comptime hot_module_reloading_transformations: bool, - hot_module_reloading_context: if (hot_module_reloading_transformations) *P.ConvertESMExportsForHmr else void, + hot_module_reloading_context: if (hot_module_reloading_transformations) *ConvertESMExportsForHmr else void, ) !ImportScanner { var scanner = ImportScanner{}; var stmts_end: usize = 0; @@ -1077,7 +1077,7 @@ pub const ImportScanner = struct { st__.* = st; } - var record: *ImportRecord = &p.import_records.items[st.import_record_index]; + const record: *ImportRecord = &p.import_records.items[st.import_record_index]; if (record.path.isMacro()) { record.is_unused = true; @@ -1272,7 +1272,7 @@ pub const ImportScanner = struct { result.* = alias; } strings.sortDesc(sorted); - p.named_imports.ensureUnusedCapacity(sorted.len) catch unreachable; + p.named_imports.ensureUnusedCapacity(p.allocator, sorted.len) catch bun.outOfMemory(); // Create named imports for these property accesses. This will // cause missing imports to generate useful warnings. @@ -1283,6 +1283,7 @@ pub const ImportScanner = struct { for (sorted) |alias| { const item = existing_items.get(alias).?; p.named_imports.put( + p.allocator, item.ref.?, js_ast.NamedImport{ .alias = alias, @@ -1290,7 +1291,7 @@ pub const ImportScanner = struct { .namespace_ref = namespace_ref, .import_record_index = st.import_record_index, }, - ) catch unreachable; + ) catch bun.outOfMemory(); const name: LocRef = item; const name_ref = name.ref.?; @@ -1314,8 +1315,9 @@ pub const ImportScanner = struct { } p.named_imports.ensureUnusedCapacity( + p.allocator, st.items.len + @as(usize, @intFromBool(st.default_name != null)) + @as(usize, @intFromBool(st.star_name_loc != null)), - ) catch unreachable; + ) catch bun.outOfMemory(); if (st.star_name_loc) |loc| { p.named_imports.putAssumeCapacity( @@ -1370,7 +1372,7 @@ pub const ImportScanner = struct { const name: LocRef = item.name; const name_ref = name.ref.?; - try p.named_imports.put(name_ref, js_ast.NamedImport{ + try p.named_imports.put(p.allocator, name_ref, js_ast.NamedImport{ .alias = item.alias, .alias_loc = name.loc, .namespace_ref = namespace_ref, @@ -1486,9 +1488,9 @@ pub const ImportScanner = struct { // Rewrite this export to be: // exports.default = // But only if it's anonymous - if (!hot_module_reloading_transformations and will_transform_to_common_js) { + if (!hot_module_reloading_transformations and will_transform_to_common_js and P != bun.bundle_v2.AstBuilder) { const expr = st.value.toExpr(); - var export_default_args = p.allocator.alloc(Expr, 2) catch unreachable; + var export_default_args = try p.allocator.alloc(Expr, 2); export_default_args[0] = p.@"module.exports"(expr.loc); export_default_args[1] = expr; stmt = p.s(S.SExpr{ .value = p.callRuntime(expr.loc, "__exportDefault", export_default_args) }, expr.loc); @@ -1504,7 +1506,7 @@ pub const ImportScanner = struct { if (st.alias) |alias| { // "export * as ns from 'path'" - try p.named_imports.put(st.namespace_ref, js_ast.NamedImport{ + try p.named_imports.put(p.allocator, st.namespace_ref, js_ast.NamedImport{ .alias = null, .alias_is_star = true, .alias_loc = alias.loc, @@ -1522,13 +1524,13 @@ pub const ImportScanner = struct { }, .s_export_from => |st| { try p.import_records_for_current_part.append(allocator, st.import_record_index); - p.named_imports.ensureUnusedCapacity(st.items.len) catch unreachable; + p.named_imports.ensureUnusedCapacity(p.allocator, st.items.len) catch unreachable; for (st.items) |item| { const ref = item.name.ref orelse p.panic("Expected export from item to have a name {any}", .{st}); // Note that the imported alias is not item.Alias, which is the // exported alias. This is somewhat confusing because each // SExportFrom statement is basically SImport + SExportClause in one. - try p.named_imports.put(ref, js_ast.NamedImport{ + try p.named_imports.put(p.allocator, ref, js_ast.NamedImport{ .alias_is_star = false, .alias = item.original_name, .alias_loc = item.name.loc, @@ -2842,7 +2844,7 @@ pub const ScanPassResult = struct { pub fn init(allocator: Allocator) ScanPassResult { return .{ .import_records = ListManaged(ImportRecord).init(allocator), - .named_imports = js_ast.Ast.NamedImports.init(allocator), + .named_imports = .{}, .used_symbols = ParsePassSymbolUsageMap.init(allocator), .import_records_to_keep = ListManaged(u32).init(allocator), .approximate_newline_count = 0, @@ -3715,7 +3717,6 @@ pub const Parser = struct { part.symbol_uses = .{}; return js_ast.Result{ .ast = js_ast.Ast{ - .allocator = p.allocator, .import_records = ImportRecord.List.init(p.import_records.items), .redirect_import_record_index = id, .named_imports = p.named_imports, @@ -4421,7 +4422,7 @@ const ParserFeatures = struct { scan_only: bool = false, }; -const ImportItemForNamespaceMap = bun.StringArrayHashMap(LocRef); +pub const ImportItemForNamespaceMap = bun.StringArrayHashMap(LocRef); pub const KnownGlobal = enum { WeakSet, @@ -5797,7 +5798,7 @@ fn NewParser_( } } - pub fn recordExport(p: *P, loc: logger.Loc, alias: string, ref: Ref) anyerror!void { + pub fn recordExport(p: *P, loc: logger.Loc, alias: string, ref: Ref) !void { if (p.named_exports.get(alias)) |name| { // Duplicate exports are an error var notes = try p.allocator.alloc(logger.Data, 1); @@ -5814,7 +5815,7 @@ fn NewParser_( .{std.mem.trim(u8, alias, "\"'")}, ); } else if (!p.isDeoptimizedCommonJS()) { - try p.named_exports.put(alias, js_ast.NamedExport{ .alias_loc = loc, .ref = ref }); + try p.named_exports.put(p.allocator, alias, js_ast.NamedExport{ .alias_loc = loc, .ref = ref }); } } @@ -5865,7 +5866,6 @@ fn NewParser_( }, .e_private_identifier => |private| { return p.loadNameFromRef(private.ref); - // return p.loadNameFromRef() }, else => { return "property"; @@ -6052,7 +6052,7 @@ fn NewParser_( }; declared_symbols.appendAssumeCapacity(.{ .ref = ref, .is_top_level = true }); try p.is_import_item.put(allocator, ref, {}); - try p.named_imports.put(ref, js_ast.NamedImport{ + try p.named_imports.put(allocator, ref, js_ast.NamedImport{ .alias = alias_name, .alias_loc = logger.Loc{}, .namespace_ref = namespace_ref, @@ -6147,7 +6147,7 @@ fn NewParser_( declared_symbols.appendAssumeCapacity(.{ .ref = entry.ref, .is_top_level = true }); try p.module_scope.generated.push(allocator, entry.ref); try p.is_import_item.put(allocator, entry.ref, {}); - try p.named_imports.put(entry.ref, .{ + try p.named_imports.put(allocator, entry.ref, .{ .alias = entry.name, .alias_loc = logger.Loc.Empty, .namespace_ref = namespace_ref, @@ -9200,6 +9200,8 @@ fn NewParser_( } } } + } else if (import_tag == .kit_resolve_to_ssr_graph) { + p.import_records.items[stmt.import_record_index].tag = import_tag; } } @@ -10206,7 +10208,7 @@ fn NewParser_( isForAwait = false; } else { // TODO: improve error handling here - // didGenerateError := p.markSyntaxFeature(compat.ForAwait, awaitRange) + // didGenerateError := p.markSyntaxFeature(compat.ForAwait, awaitRange) if (p.fn_or_arrow_data_parse.is_top_level) { p.top_level_await_keyword = await_range; // p.markSyntaxFeature(compat.TopLevelAwait, awaitRange) @@ -12209,6 +12211,7 @@ fn NewParser_( const SupportedAttribute = enum { type, embed, + bunKitGraph, }; var has_seen_embed_true = false; @@ -12217,21 +12220,17 @@ fn NewParser_( const supported_attribute: ?SupportedAttribute = brk: { // Parse the key if (p.lexer.isIdentifierOrKeyword()) { - if (strings.eqlComptime(p.lexer.identifier, "type")) { - break :brk .type; - } - - if (strings.eqlComptime(p.lexer.identifier, "embed")) { - break :brk .embed; + inline for (comptime std.enums.values(SupportedAttribute)) |t| { + if (strings.eqlComptime(p.lexer.identifier, @tagName(t))) { + break :brk t; + } } } else if (p.lexer.token == .t_string_literal) { if (p.lexer.string_literal_is_ascii) { - if (strings.eqlComptime(p.lexer.string_literal_slice, "type")) { - break :brk .type; - } - - if (strings.eqlComptime(p.lexer.string_literal_slice, "embed")) { - break :brk .embed; + inline for (comptime std.enums.values(SupportedAttribute)) |t| { + if (strings.eqlComptime(p.lexer.string_literal_slice, @tagName(t))) { + break :brk t; + } } } } else { @@ -12275,6 +12274,13 @@ fn NewParser_( } } }, + .bunKitGraph => { + if (strings.eqlComptime(p.lexer.string_literal_slice, "ssr")) { + path.import_tag = .kit_resolve_to_ssr_graph; + } else { + try p.lexer.addRangeError(p.lexer.range(), "'bunKitGraph' can only be set to 'ssr'", .{}, true); + } + }, } } } @@ -16225,10 +16231,10 @@ fn NewParser_( return exp; } - // // Capture "this" inside arrow functions that will be lowered into normal + // // Capture "this" inside arrow functions that will be lowered into normal // // function expressions for older language environments // if p.fnOrArrowDataVisit.isArrow && p.options.unsupportedJSFeatures.Has(compat.Arrow) && p.fnOnlyDataVisit.isThisNested { - // return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EIdentifier{Ref: p.captureThis()}}, exprOut{} + // return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EIdentifier{Ref: p.captureThis()}}, exprOut{} // } }, .e_import_meta => { @@ -19931,7 +19937,7 @@ fn NewParser_( data.cases[i].value = p.visitExpr(val); // TODO: error messages // Check("case", *c.Value, c.Value.Loc) - // p.warnAboutTypeofAndString(s.Test, *c.Value) + // p.warnAboutTypeofAndString(s.Test, *c.Value) } var _stmts = ListManaged(Stmt).fromOwnedSlice(p.allocator, case.body); p.visitStmts(&_stmts, StmtsKind.none) catch unreachable; @@ -23488,15 +23494,14 @@ fn NewParser_( parts_list.cap = @intCast(input_parts.len); return .{ - .allocator = p.allocator, .runtime_imports = p.runtime_imports, .parts = parts_list, .module_scope = p.module_scope.*, - .symbols = js_ast.Symbol.List.init(p.symbols.items), + .symbols = js_ast.Symbol.List.fromList(p.symbols), .exports_ref = p.exports_ref, .wrapper_ref = wrapper_ref, .module_ref = p.module_ref, - .import_records = ImportRecord.List.init(p.import_records.items), + .import_records = ImportRecord.List.fromList(p.import_records), .export_star_import_records = p.export_star_import_records.items, .approximate_newline_count = p.lexer.approximate_newline_count, .exports_kind = exports_kind, @@ -23588,317 +23593,6 @@ fn NewParser_( return false; } - const ConvertESMExportsForHmr = struct { - last_part: *js_ast.Part, - imports_seen: std.AutoArrayHashMapUnmanaged(u32, void) = .{}, - export_props: std.ArrayListUnmanaged(G.Property) = .{}, - stmts: std.ArrayListUnmanaged(Stmt) = .{}, - - fn convertStmt(ctx: *ConvertESMExportsForHmr, p: *P, stmt: Stmt) !void { - const new_stmt = switch (stmt.data) { - else => stmt, - .s_local => |st| stmt: { - if (!st.is_export) break :stmt stmt; - - st.is_export = false; - - if (st.kind.isReassignable()) { - for (st.decls.slice()) |decl| { - try ctx.visitBindingForKitModuleExports(p, decl.binding, true); - } - } else { - // TODO: remove this dupe - var dupe_decls = try std.ArrayListUnmanaged(G.Decl).initCapacity(p.allocator, st.decls.len); - - for (st.decls.slice()) |decl| { - bun.assert(decl.value != null); // const must be initialized - - switch (decl.binding.data) { - .b_missing => @panic("binding missing"), - - .b_identifier => |id| { - const symbol = p.symbols.items[id.ref.inner_index]; - - // if the symbol is not used, we don't need to preserve - // a binding in this scope. we can move it to the exports object. - if (symbol.use_count_estimate != 0 or !decl.value.?.canBeMoved()) { - dupe_decls.appendAssumeCapacity(decl); - } - - try ctx.export_props.append(p.allocator, .{ - .key = Expr.init(E.String, .{ .data = symbol.original_name }, decl.binding.loc), - .value = decl.value, - }); - }, - - else => { - dupe_decls.appendAssumeCapacity(decl); - try ctx.visitBindingForKitModuleExports(p, decl.binding, false); - }, - } - } - - if (dupe_decls.items.len == 0) { - return; - } - - st.decls = G.Decl.List.fromList(dupe_decls); - } - - break :stmt stmt; - }, - .s_export_default => |st| stmt: { - // Simple case: we can move this to the default property of the exports object - if (st.canBeMoved()) { - try ctx.export_props.append(p.allocator, .{ - .key = Expr.init(E.String, .{ .data = "default" }, stmt.loc), - .value = st.value.toExpr(), - }); - // no statement emitted - return; - } - - // Otherwise, we need a temporary - const temp_id = p.generateTempRef("default_export"); - try ctx.last_part.declared_symbols.append(p.allocator, .{ .ref = temp_id, .is_top_level = true }); - try ctx.last_part.symbol_uses.putNoClobber(p.allocator, temp_id, .{ .count_estimate = 1 }); - try p.module_scope.generated.push(p.allocator, temp_id); - - try ctx.export_props.append(p.allocator, .{ - .key = Expr.init(E.String, .{ .data = "default" }, stmt.loc), - .value = Expr.initIdentifier(temp_id, stmt.loc), - }); - - break :stmt Stmt.alloc(S.Local, .{ - .kind = .k_const, - .decls = try G.Decl.List.fromSlice(p.allocator, &.{ - .{ - .binding = Binding.alloc(p.allocator, B.Identifier{ .ref = temp_id }, stmt.loc), - .value = st.value.toExpr(), - }, - }), - }, stmt.loc); - }, - .s_class => |st| stmt: { - // Strip the "export" keyword - if (!st.is_export) break :stmt stmt; - - // Export as CommonJS - try ctx.export_props.append(p.allocator, .{ - .key = Expr.init(E.String, .{ - .data = p.symbols.items[st.class.class_name.?.ref.?.inner_index].original_name, - }, stmt.loc), - .value = Expr.initIdentifier(st.class.class_name.?.ref.?, stmt.loc), - }); - - st.is_export = false; - - break :stmt stmt; - }, - .s_function => |st| stmt: { - // Strip the "export" keyword - if (!st.func.flags.contains(.is_export)) break :stmt stmt; - - st.func.flags.remove(.is_export); - - // Export as CommonJS - try ctx.export_props.append(p.allocator, .{ - .key = Expr.init(E.String, .{ - .data = p.symbols.items[st.func.name.?.ref.?.inner_index].original_name, - }, stmt.loc), - .value = Expr.initIdentifier(st.func.name.?.ref.?, stmt.loc), - }); - - break :stmt stmt; - }, - .s_export_clause => |st| { - for (st.items) |item| { - try ctx.export_props.append(p.allocator, .{ - .key = Expr.init(E.String, .{ - .data = item.alias, - }, stmt.loc), - .value = Expr.initIdentifier(item.name.ref.?, item.name.loc), - }); - } - - return; // do not emit a statement here - }, - - .s_export_from => |st| { - _ = st; // autofix - @panic("TODO s_export_from"); - }, - .s_export_star => |st| { - _ = st; // autofix - @panic("TODO s_export_star"); - }, - - // De-duplicate import statements. It is okay to disregard - // named/default imports here as we always rewrite them as - // full qualified property accesses (need to so live-bindings) - .s_import => |st| stmt: { - const gop = try ctx.imports_seen.getOrPut(p.allocator, st.import_record_index); - if (gop.found_existing) return; - break :stmt stmt; - }, - }; - - try ctx.stmts.append(p.allocator, new_stmt); - } - - fn visitBindingForKitModuleExports( - ctx: *ConvertESMExportsForHmr, - p: *P, - binding: Binding, - is_live_binding: bool, - ) !void { - switch (binding.data) { - .b_missing => @panic("missing!"), - .b_identifier => |id| { - try ctx.visitRefForKitModuleExports(p, id.ref, binding.loc, is_live_binding); - }, - .b_array => |array| { - for (array.items) |item| { - try ctx.visitBindingForKitModuleExports(p, item.binding, is_live_binding); - } - }, - .b_object => |object| { - for (object.properties) |item| { - try ctx.visitBindingForKitModuleExports(p, item.value, is_live_binding); - } - }, - } - } - - fn visitRefForKitModuleExports( - ctx: *ConvertESMExportsForHmr, - p: *P, - ref: Ref, - loc: logger.Loc, - is_live_binding: bool, - ) !void { - const symbol = p.symbols.items[ref.inner_index]; - const id = Expr.initIdentifier(ref, loc); - if (is_live_binding) { - const key = Expr.init(E.String, .{ - .data = symbol.original_name, - }, loc); - - // This is technically incorrect in that we've marked this as a - // top level symbol. but all we care about is preventing name - // collisions, not necessarily the best minificaiton (dev only) - const arg1 = p.generateTempRef(symbol.original_name); - try ctx.last_part.declared_symbols.append(p.allocator, .{ .ref = arg1, .is_top_level = true }); - try ctx.last_part.symbol_uses.putNoClobber(p.allocator, arg1, .{ .count_estimate = 1 }); - try p.module_scope.generated.push(p.allocator, arg1); - - // Live bindings need to update the value internally and externally. - // 'get abc() { return abc }' - try ctx.export_props.append(p.allocator, .{ - .kind = .get, - .key = key, - .value = Expr.init(E.Function, .{ .func = .{ - .body = .{ - .stmts = try p.allocator.dupe(Stmt, &.{ - Stmt.alloc(S.Return, .{ .value = id }, loc), - }), - .loc = loc, - }, - } }, loc), - }); - // 'set abc(abc2) { abc = abc2 }' - try ctx.export_props.append(p.allocator, .{ - .kind = .set, - .key = key, - .value = Expr.init(E.Function, .{ .func = .{ - .args = try p.allocator.dupe(G.Arg, &.{.{ - .binding = Binding.alloc(p.allocator, B.Identifier{ .ref = arg1 }, loc), - }}), - .body = .{ - .stmts = try p.allocator.dupe(Stmt, &.{ - Stmt.alloc(S.SExpr, .{ - .value = Expr.assign(id, Expr.initIdentifier(arg1, loc)), - }, loc), - }), - .loc = loc, - }, - } }, loc), - }); - } else { - // 'abc,' - try ctx.export_props.append(p.allocator, .{ - .key = Expr.init(E.String, .{ - .data = symbol.original_name, - }, loc), - .value = id, - }); - } - } - - pub fn finalize(ctx: *ConvertESMExportsForHmr, p: *P, all_parts: []js_ast.Part) ![]js_ast.Part { - if (ctx.export_props.items.len > 0) { - // add a marker for the client runtime to tell that this is an ES module - try ctx.stmts.append(p.allocator, Stmt.alloc(S.SExpr, .{ - .value = Expr.assign( - Expr.init(E.Dot, .{ - .target = Expr.initIdentifier(p.module_ref, logger.Loc.Empty), - .name = "__esModule", - .name_loc = logger.Loc.Empty, - }, logger.Loc.Empty), - Expr.init(E.Boolean, .{ .value = true }, logger.Loc.Empty), - ), - }, logger.Loc.Empty)); - - try ctx.stmts.append(p.allocator, Stmt.alloc(S.SExpr, .{ - .value = Expr.assign( - Expr.init(E.Dot, .{ - .target = Expr.initIdentifier(p.module_ref, logger.Loc.Empty), - .name = "exports", - .name_loc = logger.Loc.Empty, - }, logger.Loc.Empty), - Expr.init(E.Object, .{ - .properties = G.Property.List.fromList(ctx.export_props), - }, logger.Loc.Empty), - ), - }, logger.Loc.Empty)); - - // mark a dependency on module_ref so it is renamed - try ctx.last_part.symbol_uses.put(p.allocator, p.module_ref, .{ .count_estimate = 1 }); - try ctx.last_part.declared_symbols.append(p.allocator, .{ .ref = p.module_ref, .is_top_level = true }); - } - - // TODO: this is a tiny mess. it is honestly trying to hard to merge all parts into one - for (all_parts[0 .. all_parts.len - 1]) |*part| { - try ctx.last_part.declared_symbols.appendList(p.allocator, part.declared_symbols); - try ctx.last_part.import_record_indices.append(p.allocator, part.import_record_indices.slice()); - for (part.symbol_uses.keys(), part.symbol_uses.values()) |k, v| { - const gop = try ctx.last_part.symbol_uses.getOrPut(p.allocator, k); - if (!gop.found_existing) { - gop.value_ptr.* = v; - } else { - gop.value_ptr.count_estimate += v.count_estimate; - } - } - part.stmts = &.{}; - part.declared_symbols.entries.len = 0; - part.tag = .dead_due_to_inlining; - part.dependencies.clearRetainingCapacity(); - try part.dependencies.push(p.allocator, .{ - .part_index = @intCast(all_parts.len - 1), - .source_index = p.source.index, - }); - } - - try ctx.last_part.import_record_indices.append(p.allocator, p.import_records_for_current_part.items); - try ctx.last_part.declared_symbols.appendList(p.allocator, p.declared_symbols); - - ctx.last_part.stmts = ctx.stmts.items; - ctx.last_part.tag = .none; - - return all_parts; - } - }; - pub fn init( allocator: Allocator, log: *logger.Log, @@ -23933,7 +23627,7 @@ fn NewParser_( .define = define, .import_records = undefined, .named_imports = undefined, - .named_exports = js_ast.Ast.NamedExports.init(allocator), + .named_exports = .{}, .log = log, .allocator = allocator, .options = opts, @@ -23979,7 +23673,7 @@ fn NewParser_( if (comptime !only_scan_imports_and_do_not_visit) { this.import_records = @TypeOf(this.import_records).init(allocator); - this.named_imports = NamedImportsType.init(allocator); + this.named_imports = .{}; } this.to_expr_wrapper_namespace = Binding2ExprWrapper.Namespace.init(this); @@ -24095,6 +23789,316 @@ const WrapMode = enum { bun_commonjs, }; +pub const ConvertESMExportsForHmr = struct { + last_part: *js_ast.Part, + imports_seen: std.AutoArrayHashMapUnmanaged(u32, void) = .{}, + export_props: std.ArrayListUnmanaged(G.Property) = .{}, + stmts: std.ArrayListUnmanaged(Stmt) = .{}, + + fn convertStmt(ctx: *ConvertESMExportsForHmr, p: anytype, stmt: Stmt) !void { + const new_stmt = switch (stmt.data) { + else => stmt, + .s_local => |st| stmt: { + if (!st.is_export) break :stmt stmt; + + st.is_export = false; + + if (st.kind.isReassignable()) { + for (st.decls.slice()) |decl| { + try ctx.visitBindingForKitModuleExports(p, decl.binding, true); + } + } else { + // TODO: remove this dupe + var dupe_decls = try std.ArrayListUnmanaged(G.Decl).initCapacity(p.allocator, st.decls.len); + + for (st.decls.slice()) |decl| { + bun.assert(decl.value != null); // const must be initialized + + switch (decl.binding.data) { + .b_missing => {}, + + .b_identifier => |id| { + const symbol = p.symbols.items[id.ref.inner_index]; + + // if the symbol is not used, we don't need to preserve + // a binding in this scope. we can move it to the exports object. + if (symbol.use_count_estimate == 0 and decl.value.?.canBeMoved()) { + try ctx.export_props.append(p.allocator, .{ + .key = Expr.init(E.String, .{ .data = symbol.original_name }, decl.binding.loc), + .value = decl.value, + }); + } else { + dupe_decls.appendAssumeCapacity(decl); + try ctx.visitBindingForKitModuleExports(p, decl.binding, false); + } + }, + + else => { + dupe_decls.appendAssumeCapacity(decl); + try ctx.visitBindingForKitModuleExports(p, decl.binding, false); + }, + } + } + + if (dupe_decls.items.len == 0) { + return; + } + + st.decls = G.Decl.List.fromList(dupe_decls); + } + + break :stmt stmt; + }, + .s_export_default => |st| stmt: { + // Simple case: we can move this to the default property of the exports object + if (st.canBeMoved()) { + try ctx.export_props.append(p.allocator, .{ + .key = Expr.init(E.String, .{ .data = "default" }, stmt.loc), + .value = st.value.toExpr(), + }); + // no statement emitted + return; + } + + // Otherwise, we need a temporary + const temp_id = p.generateTempRef("default_export"); + try ctx.last_part.declared_symbols.append(p.allocator, .{ .ref = temp_id, .is_top_level = true }); + try ctx.last_part.symbol_uses.putNoClobber(p.allocator, temp_id, .{ .count_estimate = 1 }); + try p.current_scope.generated.push(p.allocator, temp_id); + + try ctx.export_props.append(p.allocator, .{ + .key = Expr.init(E.String, .{ .data = "default" }, stmt.loc), + .value = Expr.initIdentifier(temp_id, stmt.loc), + }); + + break :stmt Stmt.alloc(S.Local, .{ + .kind = .k_const, + .decls = try G.Decl.List.fromSlice(p.allocator, &.{ + .{ + .binding = Binding.alloc(p.allocator, B.Identifier{ .ref = temp_id }, stmt.loc), + .value = st.value.toExpr(), + }, + }), + }, stmt.loc); + }, + .s_class => |st| stmt: { + // Strip the "export" keyword + if (!st.is_export) break :stmt stmt; + + // Export as CommonJS + try ctx.export_props.append(p.allocator, .{ + .key = Expr.init(E.String, .{ + .data = p.symbols.items[st.class.class_name.?.ref.?.inner_index].original_name, + }, stmt.loc), + .value = Expr.initIdentifier(st.class.class_name.?.ref.?, stmt.loc), + }); + + st.is_export = false; + + break :stmt stmt; + }, + .s_function => |st| stmt: { + // Strip the "export" keyword + if (!st.func.flags.contains(.is_export)) break :stmt stmt; + + st.func.flags.remove(.is_export); + + // Export as CommonJS + try ctx.export_props.append(p.allocator, .{ + .key = Expr.init(E.String, .{ + .data = p.symbols.items[st.func.name.?.ref.?.inner_index].original_name, + }, stmt.loc), + .value = Expr.initIdentifier(st.func.name.?.ref.?, stmt.loc), + }); + + break :stmt stmt; + }, + .s_export_clause => |st| { + for (st.items) |item| { + try ctx.export_props.append(p.allocator, .{ + .key = Expr.init(E.String, .{ + .data = item.alias, + }, stmt.loc), + .value = Expr.initIdentifier(item.name.ref.?, item.name.loc), + }); + } + + return; // do not emit a statement here + }, + + .s_export_from => { + bun.todoPanic(@src(), "hot-module-reloading instrumentation for 'export {{ ... }} from'", .{}); + }, + .s_export_star => { + bun.todoPanic(@src(), "hot-module-reloading instrumentation for 'export * from'", .{}); + }, + + // De-duplicate import statements. It is okay to disregard + // named/default imports here as we always rewrite them as + // full qualified property accesses (need to so live-bindings) + .s_import => |st| stmt: { + const gop = try ctx.imports_seen.getOrPut(p.allocator, st.import_record_index); + if (gop.found_existing) return; + break :stmt stmt; + }, + }; + + try ctx.stmts.append(p.allocator, new_stmt); + } + + fn visitBindingForKitModuleExports( + ctx: *ConvertESMExportsForHmr, + p: anytype, + binding: Binding, + is_live_binding: bool, + ) !void { + switch (binding.data) { + .b_missing => {}, + .b_identifier => |id| { + try ctx.visitRefForKitModuleExports(p, id.ref, binding.loc, is_live_binding); + }, + .b_array => |array| { + for (array.items) |item| { + try ctx.visitBindingForKitModuleExports(p, item.binding, is_live_binding); + } + }, + .b_object => |object| { + for (object.properties) |item| { + try ctx.visitBindingForKitModuleExports(p, item.value, is_live_binding); + } + }, + } + } + + fn visitRefForKitModuleExports( + ctx: *ConvertESMExportsForHmr, + p: anytype, + ref: Ref, + loc: logger.Loc, + is_live_binding: bool, + ) !void { + const symbol = p.symbols.items[ref.inner_index]; + const id = Expr.initIdentifier(ref, loc); + if (is_live_binding) { + const key = Expr.init(E.String, .{ + .data = symbol.original_name, + }, loc); + + // This is technically incorrect in that we've marked this as a + // top level symbol. but all we care about is preventing name + // collisions, not necessarily the best minificaiton (dev only) + const arg1 = p.generateTempRef(symbol.original_name); + try ctx.last_part.declared_symbols.append(p.allocator, .{ .ref = arg1, .is_top_level = true }); + try ctx.last_part.symbol_uses.putNoClobber(p.allocator, arg1, .{ .count_estimate = 1 }); + try p.current_scope.generated.push(p.allocator, arg1); + + // Live bindings need to update the value internally and externally. + // 'get abc() { return abc }' + try ctx.export_props.append(p.allocator, .{ + .kind = .get, + .key = key, + .value = Expr.init(E.Function, .{ .func = .{ + .body = .{ + .stmts = try p.allocator.dupe(Stmt, &.{ + Stmt.alloc(S.Return, .{ .value = id }, loc), + }), + .loc = loc, + }, + } }, loc), + }); + // 'set abc(abc2) { abc = abc2 }' + try ctx.export_props.append(p.allocator, .{ + .kind = .set, + .key = key, + .value = Expr.init(E.Function, .{ .func = .{ + .args = try p.allocator.dupe(G.Arg, &.{.{ + .binding = Binding.alloc(p.allocator, B.Identifier{ .ref = arg1 }, loc), + }}), + .body = .{ + .stmts = try p.allocator.dupe(Stmt, &.{ + Stmt.alloc(S.SExpr, .{ + .value = Expr.assign(id, Expr.initIdentifier(arg1, loc)), + }, loc), + }), + .loc = loc, + }, + } }, loc), + }); + } else { + // 'abc,' + try ctx.export_props.append(p.allocator, .{ + .key = Expr.init(E.String, .{ + .data = symbol.original_name, + }, loc), + .value = id, + }); + } + } + + pub fn finalize(ctx: *ConvertESMExportsForHmr, p: anytype, all_parts: []js_ast.Part) ![]js_ast.Part { + if (ctx.export_props.items.len > 0) { + // add a marker for the client runtime to tell that this is an ES module + try ctx.stmts.append(p.allocator, Stmt.alloc(S.SExpr, .{ + .value = Expr.assign( + Expr.init(E.Dot, .{ + .target = Expr.initIdentifier(p.module_ref, logger.Loc.Empty), + .name = "__esModule", + .name_loc = logger.Loc.Empty, + }, logger.Loc.Empty), + Expr.init(E.Boolean, .{ .value = true }, logger.Loc.Empty), + ), + }, logger.Loc.Empty)); + + try ctx.stmts.append(p.allocator, Stmt.alloc(S.SExpr, .{ + .value = Expr.assign( + Expr.init(E.Dot, .{ + .target = Expr.initIdentifier(p.module_ref, logger.Loc.Empty), + .name = "exports", + .name_loc = logger.Loc.Empty, + }, logger.Loc.Empty), + Expr.init(E.Object, .{ + .properties = G.Property.List.fromList(ctx.export_props), + }, logger.Loc.Empty), + ), + }, logger.Loc.Empty)); + + // mark a dependency on module_ref so it is renamed + try ctx.last_part.symbol_uses.put(p.allocator, p.module_ref, .{ .count_estimate = 1 }); + try ctx.last_part.declared_symbols.append(p.allocator, .{ .ref = p.module_ref, .is_top_level = true }); + } + + // TODO: this is a tiny mess. it is honestly trying to hard to merge all parts into one + for (all_parts[0 .. all_parts.len - 1]) |*part| { + try ctx.last_part.declared_symbols.appendList(p.allocator, part.declared_symbols); + try ctx.last_part.import_record_indices.append(p.allocator, part.import_record_indices.slice()); + for (part.symbol_uses.keys(), part.symbol_uses.values()) |k, v| { + const gop = try ctx.last_part.symbol_uses.getOrPut(p.allocator, k); + if (!gop.found_existing) { + gop.value_ptr.* = v; + } else { + gop.value_ptr.count_estimate += v.count_estimate; + } + } + part.stmts = &.{}; + part.declared_symbols.entries.len = 0; + part.tag = .dead_due_to_inlining; + part.dependencies.clearRetainingCapacity(); + try part.dependencies.push(p.allocator, .{ + .part_index = @intCast(all_parts.len - 1), + .source_index = p.source.index, + }); + } + + try ctx.last_part.import_record_indices.append(p.allocator, p.import_records_for_current_part.items); + try ctx.last_part.declared_symbols.appendList(p.allocator, p.declared_symbols); + + ctx.last_part.stmts = ctx.stmts.items; + ctx.last_part.tag = .none; + + return all_parts; + } +}; + /// Equivalent of esbuild's js_ast_helpers.ToInt32 fn floatToInt32(f: f64) i32 { // Special-case non-finite numbers diff --git a/src/js_printer.zig b/src/js_printer.zig index 976a29ddd751b..6656faacb2e31 100644 --- a/src/js_printer.zig +++ b/src/js_printer.zig @@ -518,7 +518,6 @@ pub const Options = struct { require_ref: ?Ref = null, import_meta_ref: Ref = Ref.None, indent: Indentation = .{}, - externals: []u32 = &[_]u32{}, runtime_imports: runtime.Runtime.Imports = runtime.Runtime.Imports{}, module_hash: u32 = 0, source_path: ?fs.Path = null, @@ -1092,11 +1091,6 @@ fn NewPrinter( printInternalBunImport(p, import, @TypeOf("globalThis.Bun"), "globalThis.Bun"); } - fn printHardcodedImportStatement(p: *Printer, import: S.Import) void { - if (comptime !is_bun_platform) unreachable; - printInternalBunImport(p, import, void, {}); - } - fn printInternalBunImport(p: *Printer, import: S.Import, comptime Statement: type, statement: Statement) void { if (comptime !is_bun_platform) unreachable; @@ -2039,7 +2033,9 @@ fn NewPrinter( p.print(".require("); { const path = input_files[record.source_index.get()].path; - p.printInlinedEnum(.{ .number = @floatFromInt(path.hashForKit()) }, path.pretty, level); + p.print('"'); + p.printUTF8StringEscapedQuotes(path.pretty, '"'); + p.print('"'); } p.print(")"); } else if (!meta.was_unwrapped_require) { @@ -2085,18 +2081,12 @@ fn NewPrinter( return; } - const is_external = std.mem.indexOfScalar( - u32, - p.options.externals, - import_record_index, - ) != null; - // External "require()" if (record.kind != .dynamic) { p.printSpaceBeforeIdentifier(); if (p.options.inline_require_and_import_errors) { - if (record.path.is_disabled and record.handles_import_errors and !is_external) { + if (record.path.is_disabled and record.handles_import_errors) { p.printRequireError(record.path.text); return; } @@ -2107,6 +2097,23 @@ fn NewPrinter( } } + if (p.options.module_type == .internal_kit_dev) { + p.printSpaceBeforeIdentifier(); + p.printSymbol(p.options.commonjs_module_ref); + if (record.tag == .builtin) + p.print(".importBuiltin(") + else + p.print(".require("); + { + const path = record.path; + p.print('"'); + p.printUTF8StringEscapedQuotes(path.pretty, '"'); + p.print('"'); + } + p.print(")"); + return; + } + if (p.options.module_type == .esm and is_bun_platform) { p.print("import.meta.require"); } else if (p.options.require_ref) |ref| { @@ -2626,7 +2633,12 @@ fn NewPrinter( p.printSpaceBeforeIdentifier(); p.addSourceMapping(expr.loc); - p.print("import("); + if (p.options.module_type == .internal_kit_dev) { + p.printSymbol(p.options.commonjs_module_ref); + p.print(".dynamicImport("); + } else { + p.print("import("); + } // TODO: // if (e.leading_interior_comments.len > 0) { // p.printNewline(); @@ -3110,6 +3122,10 @@ fn NewPrinter( e.ref; const symbol = p.symbols().get(ref).?; + // if (bun.strings.eql(symbol.original_name, "registerClientReference")) { + // @breakpoint(); + // } + if (symbol.import_item_status == .missing) { p.printUndefined(expr.loc, level); didPrint = true; @@ -3444,13 +3460,8 @@ fn NewPrinter( } } - pub fn printNamespaceAlias(p: *Printer, import_record: ImportRecord, namespace: G.NamespaceAlias) void { - if (import_record.module_id > 0 and !import_record.contains_import_star) { - p.print("$"); - p.printModuleId(import_record.module_id); - } else { - p.printSymbol(namespace.namespace_ref); - } + pub fn printNamespaceAlias(p: *Printer, _: ImportRecord, namespace: G.NamespaceAlias) void { + p.printSymbol(namespace.namespace_ref); // In the case of code like this: // module.exports = require("foo") @@ -3710,9 +3721,10 @@ fn NewPrinter( } } }, - // .e_import_identifier => |e| inner: { - .e_import_identifier => |e| { + .e_import_identifier => |e| inner: { const ref = p.symbols().follow(e.ref); + if (p.options.input_files_for_kit != null) + break :inner; // if (p.options.const_values.count() > 0 and p.options.const_values.contains(ref)) // break :inner; @@ -4345,62 +4357,6 @@ fn NewPrinter( const import_record = p.importRecord(s.import_record_index); - if (comptime is_bun_platform) { - if (import_record.do_commonjs_transform_in_printer) { - if (s.items.len == 0) - return; - p.print("var {"); - var symbol_counter: u32 = p.symbol_counter; - - for (s.items, 0..) |item, i| { - if (i > 0) { - p.print(","); - } - - p.print(item.original_name); - assert(item.original_name.len > 0); - p.print(":"); - // this is unsound - // this is technical debt - // we need to handle symbol collisions for this - p.print("$eXp0rT_"); - var buf: [16]u8 = undefined; - p.print(std.fmt.bufPrint(&buf, "{}", .{bun.fmt.hexIntLower(symbol_counter)}) catch unreachable); - symbol_counter +|= 1; - } - - p.print("}=import.meta.require("); - p.printImportRecordPath(import_record); - p.print(")"); - p.printSemicolonAfterStatement(); - p.printWhitespacer(ws("export {")); - - // reset symbol counter back - symbol_counter = p.symbol_counter; - - for (s.items, 0..) |item, i| { - if (i > 0) { - p.print(","); - } - - // this is unsound - // this is technical debt - // we need to handle symbol collisions for this - p.print("$eXp0rT_"); - var buf: [16]u8 = undefined; - p.print(std.fmt.bufPrint(&buf, "{}", .{bun.fmt.hexIntLower(symbol_counter)}) catch unreachable); - symbol_counter +|= 1; - p.printWhitespacer(ws(" as ")); - p.print(item.alias); - } - - p.print("}"); - p.printSemicolonAfterStatement(); - p.symbol_counter = symbol_counter; - return; - } - } - p.printWhitespacer(ws("export {")); if (!s.is_single_line) { @@ -4755,47 +4711,17 @@ fn NewPrinter( p.printGlobalBunImportStatement(s.*); return; }, - // .hardcoded => { - // p.printHardcodedImportStatement(s.*); - // return; - // }, else => {}, } } - if (record.do_commonjs_transform_in_printer or record.path.is_disabled) { - const require_ref = p.options.require_ref; - - const module_id = record.module_id; - - if (!record.path.is_disabled and std.mem.indexOfScalar(u32, p.imported_module_ids.items, module_id) == null) { - p.printWhitespacer(ws("import * as")); - p.print(" "); - p.printModuleId(module_id); - p.print(" "); - p.printWhitespacer(ws("from ")); - p.print("\""); - p.print(record.path.text); - p.print("\""); - p.printSemicolonAfterStatement(); - try p.imported_module_ids.append(module_id); - } - + if (record.path.is_disabled) { if (record.contains_import_star) { p.print("var "); p.printSymbol(s.namespace_ref); p.@"print = "(); - if (!record.path.is_disabled) { - p.printSymbol(require_ref.?); - p.print("("); - p.printModuleId(module_id); - - p.print(");"); - p.printNewline(); - } else { - p.printDisabledImport(); - p.printSemicolonAfterStatement(); - } + p.printDisabledImport(); + p.printSemicolonAfterStatement(); } if (s.items.len > 0 or s.default_name != null) { @@ -4839,12 +4765,6 @@ fn NewPrinter( if (record.contains_import_star) { p.printSymbol(s.namespace_ref); p.printSemicolonAfterStatement(); - } else if (!record.path.is_disabled) { - p.printSymbol(require_ref.?); - p.print("("); - p.printModuleId(module_id); - p.print(")"); - p.printSemicolonAfterStatement(); } else { p.printDisabledImport(); p.printSemicolonAfterStatement(); @@ -5056,13 +4976,7 @@ fn NewPrinter( unreachable; const quote = bestQuoteCharForString(u8, import_record.path.text, false); - if (import_record.print_namespace_in_path and import_record.module_id != 0) { - p.print(quote); - p.print(import_record.path.namespace); - p.print(":"); - p.printModuleIdAssumeEnabled(import_record.module_id); - p.print(quote); - } else if (import_record.print_namespace_in_path and !import_record.path.isFile()) { + if (import_record.print_namespace_in_path and !import_record.path.isFile()) { p.print(quote); p.print(import_record.path.namespace); p.print(":"); @@ -6433,8 +6347,9 @@ pub fn printWithWriterAndPlatform( if (opts.module_type == .internal_kit_dev) { printer.indent(); printer.printIndent(); - printer.fmt("{d}", .{source.path.hashForKit()}) catch bun.outOfMemory(); - printer.print(": function"); + printer.print('"'); + printer.printUTF8StringEscapedQuotes(source.path.pretty, '"'); + printer.print('"'); printer.printFunc(parts[0].stmts[0].data.s_expr.value.data.e_function.func); printer.print(",\n"); } else { diff --git a/src/kit/DevServer.zig b/src/kit/DevServer.zig index 804a8803b492c..4e9b6989e39b6 100644 --- a/src/kit/DevServer.zig +++ b/src/kit/DevServer.zig @@ -1,5 +1,5 @@ //! Instance of the development server. Controls an event loop, web server, -//! bundling threads, and JavaScript VM instance. All data is held in memory. +//! bundling state, and JavaScript VM instance. All work is cached in-memory. //! //! Currently does not have a `deinit()`, as it is assumed to be alive for the //! remainder of this process' lifespan. @@ -8,10 +8,11 @@ pub const DevServer = @This(); pub const Options = struct { cwd: []u8, routes: []Route, + framework: kit.Framework, listen_config: uws.AppListenConfig = .{ .port = 3000 }, dump_sources: ?[]const u8 = if (Environment.isDebug) ".kit-debug" else null, verbose_watcher: bool = false, - // TODO: make it possible to inherit a js VM + // TODO: make it required to inherit a js VM }; /// Accepting a custom allocator for all of DevServer would be misleading @@ -19,7 +20,6 @@ pub const Options = struct { const default_allocator = bun.default_allocator; cwd: []const u8, -dump_dir: ?std.fs.Dir, // UWS App app: *App, @@ -33,16 +33,32 @@ listener: ?*App.ListenSocket, // Server Runtime server_global: *DevGlobalObject, vm: *VirtualMachine, +/// This is a handle to the server_fetch_function, which is shared +/// across all loaded modules. Its type is `(Request, Id) => Response` +server_fetch_function_callback: JSC.Strong, +server_register_update_callback: JSC.Strong, // Bundling -bundle_thread: BundleThread, +client_graph: IncrementalGraph(.client), +server_graph: IncrementalGraph(.server), +framework: kit.Framework, +bun_watcher: *JSC.Watcher, +server_bundler: Bundler, +client_bundler: Bundler, +ssr_bundler: Bundler, +/// Stored and reused for bundling tasks +log: Log, + +/// To reduce complexity of BundleV2's return type being different on +/// compile-time logic, extra kit-specific metadata is returned through a +/// pointer to DevServer, and writing directly to this field. +/// +/// Only one bundle is run at a time (batched with all files needed), +/// so there is never contention. +bundle_result: ?ExtraBundleData, -// // Watch + HMR -// bun_watcher: *HotReloader.Watcher, -/// Required by `bun.JSC.NewHotReloader` -bundler: Bundler, -/// Required by `Bundler` -log_do_not_use: Log, +// Debugging +dump_dir: ?std.fs.Dir, pub const internal_prefix = "/_bun"; pub const client_prefix = internal_prefix ++ "/client"; @@ -52,8 +68,10 @@ pub const Route = struct { pattern: [:0]const u8, entry_point: []const u8, - server_bundle: BundlePromise(ServerBundle) = .unqueued, - client_bundle: BundlePromise(ClientBundle) = .unqueued, + bundle: BundleState = .stale, + client_files: std.AutoArrayHashMapUnmanaged(IncrementalGraph(.client).Index, void) = .{}, + server_files: std.AutoArrayHashMapUnmanaged(IncrementalGraph(.server).Index, void) = .{}, + module_name_string: ?bun.String = null, /// Assigned in DevServer.init dev: *DevServer = undefined, @@ -62,31 +80,46 @@ pub const Route = struct { pub fn clientPublicPath(route: *const Route) []const u8 { return route.client_bundled_url[0 .. route.client_bundled_url.len - "/client.js".len]; } -}; -/// Prepared server-side bundle and loaded JavaScript module -const ServerBundle = struct { - files: []OutputFile, - server_request_callback: JSC.JSValue, + pub const Index = enum(u32) { _ }; }; -/// Preparred client-side bundle. -/// Namespaced to URL: `/_bun/client/:route_index/:file_path` -const ClientBundle = struct { - files: []OutputFile, - /// Indexes into this are indexes into `files`. - /// This is case insensitive because URL paths should be case insensitive. - files_index: bun.CaseInsensitiveASCIIStringArrayHashMapUnmanaged(void), - - pub fn getFile(bundle: *ClientBundle, filename: []const u8) ?*OutputFile { - return if (bundle.files_index.getIndex(filename)) |i| - &bundle.files[i] - else - null; +/// Three-way maybe state +const BundleState = union(enum) { + /// Bundled assets are not prepared + stale, + /// Build failure + fail: Failure, + + ready: Bundle, + + fn reset(s: *BundleState) void { + switch (s.*) { + .stale => return, + .fail => |f| f.deinit(), + .ready => |b| b.deinit(), + } + s.* = .stale; } + + const NonStale = union(enum) { + /// Build failure + fail: Failure, + ready: Bundle, + }; +}; + +const Bundle = struct { + /// Backed by default_allocator. + client_bundle: []const u8, }; -pub fn init(options: Options) *DevServer { +pub fn init(options: Options) !*DevServer { + { + @panic("Behavior Regressed due to Watcher Changes"); + } + + bun.analytics.Features.kit_dev +|= 1; if (JSC.VirtualMachine.VMHolder.vm != null) @panic("Cannot initialize kit.DevServer on a thread with an active JSC.VirtualMachine"); @@ -101,6 +134,8 @@ pub fn init(options: Options) *DevServer { const app = App.create(.{}); + const separate_ssr_graph = if (options.framework.server_components) |sc| sc.separate_ssr_graph else false; + const dev = bun.new(DevServer, .{ .cwd = options.cwd, .app = app, @@ -109,46 +144,44 @@ pub fn init(options: Options) *DevServer { .port = @intCast(options.listen_config.port), .hostname = options.listen_config.host orelse "localhost", }, + .server_fetch_function_callback = .{}, + .server_register_update_callback = .{}, .listener = null, - .bundle_thread = BundleThread.uninitialized, + .log = Log.init(default_allocator), + .client_graph = undefined, + .server_graph = undefined, + .dump_dir = dump_dir, + .framework = options.framework, + .bundle_result = null, + .server_global = undefined, .vm = undefined, - .dump_dir = dump_dir, - // .bun_watcher = undefined, - .bundler = undefined, - .log_do_not_use = Log.init(bun.failing_allocator), + .bun_watcher = undefined, + .server_bundler = undefined, + .client_bundler = undefined, + .ssr_bundler = undefined, }); - dev.bundler = bun.Bundler.init( - default_allocator, - &dev.log_do_not_use, - std.mem.zeroes(bun.Schema.Api.TransformOptions), - null, // TODO: - ) catch bun.outOfMemory(); - - const loaders = bun.options.loadersFromTransformOptions(default_allocator, null, .bun) catch - bun.outOfMemory(); - - dev.bundler.options = .{ - .entry_points = &.{}, - .define = dev.bundler.options.define, - .loaders = loaders, - .log = &dev.log_do_not_use, - .output_dir = "", // this disables filesystem output - .output_format = .internal_kit_dev, - .out_extensions = bun.StringHashMap([]const u8).init(bun.failing_allocator), - - // unused by all code - .resolve_mode = .dev, - // technically used (in macro) but should be removed - .transform_options = std.mem.zeroes(bun.Schema.Api.TransformOptions), - }; - dev.bundler.configureLinker(); - dev.bundler.resolver.opts = dev.bundler.options; + dev.server_graph = .{ .owner = dev }; + dev.client_graph = .{ .owner = dev }; - // const fs = bun.fs.FileSystem.init(options.cwd) catch @panic("Failed to init FileSystem"); + // const fs = try bun.fs.FileSystem.init(options.cwd); // dev.bun_watcher = HotReloader.init(dev, fs, options.verbose_watcher, false); - // dev.bundler.resolver.watcher = dev.bun_watcher.getResolveWatcher(); + // dev.server_bundler.resolver.watcher = dev.bun_watcher.getResolveWatcher(); + // dev.client_bundler.resolver.watcher = dev.bun_watcher.getResolveWatcher(); + + try dev.initBundler(&dev.server_bundler, .server); + try dev.initBundler(&dev.client_bundler, .client); + if (separate_ssr_graph) + try dev.initBundler(&dev.ssr_bundler, .ssr); + + dev.framework = dev.framework.resolve( + &dev.server_bundler.resolver, + &dev.client_bundler.resolver, + ) catch { + Output.errGeneric("Failed to resolve all imports required by the framework", .{}); + return error.FrameworkInitialization; + }; dev.vm = VirtualMachine.initKit(.{ .allocator = default_allocator, @@ -161,11 +194,6 @@ pub fn init(options: Options) *DevServer { dev.vm.jsc = dev.vm.global.vm(); dev.vm.event_loop.ensureWaker(); - _ = JSC.WorkPool.get(); - const thread = dev.bundle_thread.spawn() catch |err| - Output.panic("Failed to spawn bundler thread: {}", .{err}); - thread.detach(); - var has_fallback = false; for (options.routes, 0..) |*route, i| { @@ -199,6 +227,57 @@ pub fn init(options: Options) *DevServer { return dev; } +fn initBundler(dev: *DevServer, bundler: *Bundler, comptime renderer: kit.Renderer) !void { + const framework = dev.framework; + + bundler.* = try bun.Bundler.init( + default_allocator, // TODO: this is likely a memory leak + &dev.log, + std.mem.zeroes(bun.Schema.Api.TransformOptions), + null, // TODO: + ); + + bundler.options.target = switch (renderer) { + .client => .browser, + .server, .ssr => .bun, + }; + bundler.options.public_path = switch (renderer) { + .client => client_prefix, + .server, .ssr => dev.cwd, + }; + bundler.options.entry_points = &.{}; + bundler.options.log = &dev.log; + bundler.options.output_dir = ""; // this disables filesystem output; + bundler.options.entry_naming = "bundle.js"; // unused output file generation is skipped + bundler.options.output_format = .internal_kit_dev; + bundler.options.out_extensions = bun.StringHashMap([]const u8).init(bundler.allocator); + bundler.options.hot_module_reloading = true; + + bundler.options.react_fast_refresh = renderer == .client and framework.react_fast_refresh != null; + bundler.options.server_components = framework.server_components != null; + + bundler.options.conditions = try bun.options.ESMConditions.init(default_allocator, bundler.options.target.defaultConditions()); + if (renderer == .server and framework.server_components != null) { + try bundler.options.conditions.appendSlice(&.{"react-server"}); + } + + bundler.options.tree_shaking = false; + bundler.options.minify_syntax = true; + bundler.options.minify_identifiers = false; + bundler.options.minify_whitespace = false; + bundler.options.kit = dev; + + bundler.configureLinker(); + try bundler.configureDefines(); + + try kit.addImportMetaDefines(default_allocator, bundler.options.define, .development, switch (renderer) { + .client => .client, + .server, .ssr => .server, + }); + + bundler.resolver.opts = bundler.options; +} + pub fn runLoopForever(dev: *DevServer) noreturn { const lock = dev.vm.jsc.getAPILock(); defer lock.release(); @@ -213,7 +292,7 @@ pub fn runLoopForever(dev: *DevServer) noreturn { fn onListen(ctx: *DevServer, maybe_listen: ?*App.ListenSocket) void { const listen: *App.ListenSocket = maybe_listen orelse { - @panic("TODO: handle listen failure"); + bun.todoPanic(@src(), "handle listen failure", .{}); }; ctx.listener = listen; @@ -235,31 +314,196 @@ fn onAssetRequestInit(dev: *DevServer, req: *Request, resp: *Response) void { return req.setYield(true); break :route &dev.routes[i]; }; - const asset_name = req.parameter(1); - dev.getOrEnqueueBundle(resp, route, .client, .{ .file_name = asset_name }); + // const asset_name = req.parameter(1); + switch (route.dev.getRouteBundle(route)) { + .ready => |bundle| { + sendJavaScriptSource(bundle.client_bundle, resp); + }, + .fail => |fail| { + fail.sendAsHttpResponse(resp, route); + }, + } } fn onServerRequestInit(route: *Route, req: *Request, resp: *Response) void { - _ = req; - route.dev.getOrEnqueueBundle(resp, route, .server, .{}); + switch (route.dev.getRouteBundle(route)) { + .ready => |ready| { + onServerRequestWithBundle(route, ready, req, resp); + }, + .fail => |fail| { + fail.sendAsHttpResponse(resp, route); + }, + } } -// uws with bundle handlers +const ExtraBundleData = struct {}; + +fn getRouteBundle(dev: *DevServer, route: *Route) BundleState.NonStale { + if (route.bundle == .stale) { + var fail: Failure = .{ + .zig_error = error.FileNotFound, + }; + route.bundle = bundle: { + const success = dev.performBundleAndWaitInner(route, &fail) catch |err| { + bun.handleErrorReturnTrace(err, @errorReturnTrace()); + if (fail == .zig_error) { + if (dev.log.hasAny()) { + fail = Failure.fromLog(&dev.log); + } else { + fail = .{ .zig_error = err }; + } + } + fail.printToConsole(route); + break :bundle .{ .fail = fail }; + }; + break :bundle .{ .ready = success }; + }; + } + return switch (route.bundle) { + .stale => unreachable, + .fail => |fail| .{ .fail = fail }, + .ready => |ready| .{ .ready = ready }, + }; +} + +/// Error handling is done either by writing to `fail` with a specific failure, +/// or by appending to `dev.log`. The caller, `getRouteBundle`, will handle the +/// error, including replying to the request as well as console logging. +fn performBundleAndWaitInner(dev: *DevServer, route: *Route, fail: *Failure) !Bundle { + var heap = try ThreadlocalArena.init(); + defer heap.deinit(); + + const allocator = heap.allocator(); + var ast_memory_allocator = try allocator.create(bun.JSAst.ASTMemoryAllocator); + ast_memory_allocator.* = .{ .allocator = allocator }; + ast_memory_allocator.reset(); + ast_memory_allocator.push(); + + if (dev.framework.server_components == null) { + // The handling of the dependency graph is SLIGHTLY different. It's + // enough that it would be incorrect to let the current code execute at + // all. + bun.todoPanic(@src(), "support non-server components build", .{}); + } + + const bv2 = try BundleV2.init( + &dev.server_bundler, + if (dev.framework.server_components != null) .{ + .framework = dev.framework, + .client_bundler = &dev.client_bundler, + .ssr_bundler = &dev.ssr_bundler, + } else @panic("TODO: support non-server components"), + allocator, + JSC.AnyEventLoop.init(allocator), + false, // reloading is handled separately + JSC.WorkPool.get(), + heap, + ); + bv2.bun_watcher = dev.bun_watcher; + // this.plugins = completion.plugins; + + defer { + if (bv2.graph.pool.pool.threadpool_context == @as(?*anyopaque, @ptrCast(bv2.graph.pool))) { + bv2.graph.pool.pool.threadpool_context = null; + } + ast_memory_allocator.pop(); + bv2.deinit(); + } + + errdefer { + // Wait for wait groups to finish. There still may be ongoing work. + bv2.linker.source_maps.line_offset_wait_group.wait(); + bv2.linker.source_maps.quoted_contents_wait_group.wait(); + } + + const output_files = try bv2.runFromJSInNewThread(&.{ + route.entry_point, + dev.framework.entry_server.?, + }, &.{ + dev.framework.entry_client.?, + }); -fn onAssetRequestWithBundle(route: *Route, resp: *Response, ctx: BundleKind.client.Context(), bundle: *ClientBundle) void { - _ = route; + try dev.client_graph.ensureStaleBitCapacity(); + try dev.server_graph.ensureStaleBitCapacity(); - const file = bundle.getFile(ctx.file_name) orelse - return sendBuiltInNotFound(resp); + assert(output_files.items.len == 0); - sendOutputFile(file, resp); + bv2.bundler.log.printForLogLevel(Output.errorWriter()) catch {}; + bv2.client_bundler.log.printForLogLevel(Output.errorWriter()) catch {}; + + const server_bundle = try dev.server_graph.takeBundle(.initial_response); + defer default_allocator.free(server_bundle); + + const client_bundle = try dev.client_graph.takeBundle(.initial_response); + errdefer default_allocator.free(client_bundle); + + if (dev.log.hasAny()) { + dev.log.printForLogLevel(Output.errorWriter()) catch {}; + } + + const server_code = c.KitLoadServerCode(dev.server_global, bun.String.createLatin1(server_bundle)); + dev.vm.waitForPromise(.{ .internal = server_code.promise }); + + switch (server_code.promise.unwrap(dev.vm.jsc, .mark_handled)) { + .pending => unreachable, // promise is settled + .rejected => |err| { + fail.* = Failure.fromJSServerLoad(err, dev.server_global.js()); + return error.ServerJSLoad; + }, + .fulfilled => |v| bun.assert(v == .undefined), + } + + if (route.module_name_string == null) { + route.module_name_string = bun.String.createUTF8(bun.path.relative(dev.cwd, route.entry_point)); + } + + if (!dev.server_fetch_function_callback.has()) { + const default_export = c.KitGetRequestHandlerFromModule(dev.server_global, server_code.key); + if (!default_export.isObject()) + @panic("Internal assertion failure: expected interface from HMR runtime to be an object"); + const fetch_function: JSValue = default_export.get(dev.server_global.js(), "handleRequest") orelse + @panic("Internal assertion failure: expected interface from HMR runtime to contain handleRequest"); + bun.assert(fetch_function.isCallable(dev.vm.jsc)); + dev.server_fetch_function_callback = JSC.Strong.create(fetch_function, dev.server_global.js()); + const register_update = default_export.get(dev.server_global.js(), "registerUpdate") orelse + @panic("Internal assertion failure: expected interface from HMR runtime to contain registerUpdate"); + dev.server_register_update_callback = JSC.Strong.create(register_update, dev.server_global.js()); + + fetch_function.ensureStillAlive(); + register_update.ensureStillAlive(); + } else { + bun.todoPanic(@src(), "Kit: server's secondary bundle", .{}); + } + + return .{ + .client_bundle = client_bundle, + }; } -fn onServerRequestWithBundle(route: *Route, resp: *Response, ctx: BundleKind.server.Context(), bundle: *ServerBundle) void { - _ = ctx; // autofix +pub fn receiveChunk( + dev: *DevServer, + abs_path: []const u8, + side: kit.Renderer, + chunk: bun.bundle_v2.CompileResult, +) !void { + return switch (side) { + .server => dev.server_graph.addChunk(abs_path, chunk, false), + .ssr => dev.server_graph.addChunk(abs_path, chunk, true), + .client => dev.client_graph.addChunk(abs_path, chunk, false), + }; +} + +// uws with bundle handlers + +fn onServerRequestWithBundle(route: *Route, bundle: Bundle, req: *Request, resp: *Response) void { + _ = bundle; + _ = req; const dev = route.dev; const global = dev.server_global.js(); + const server_request_callback = dev.server_fetch_function_callback.get() orelse + unreachable; // did not bundle + const context = JSValue.createEmptyObject(global, 1); context.put( dev.server_global.js(), @@ -267,18 +511,35 @@ fn onServerRequestWithBundle(route: *Route, resp: *Response, ctx: BundleKind.ser bun.String.init(route.client_bundled_url).toJS(global), ); - const result = bundle.server_request_callback.call( + var result = server_request_callback.call( global, .undefined, - &.{context}, + &.{ + context, + route.module_name_string.?.toJS(dev.server_global.js()), + }, ) catch |err| { const exception = global.takeException(err); const fail: Failure = .{ .request_handler = exception }; - fail.printToConsole(route, .server); - fail.sendAsHttpResponse(resp, route, .server); + fail.printToConsole(route); + fail.sendAsHttpResponse(resp, route); return; }; + if (result.asAnyPromise()) |promise| { + dev.vm.waitForPromise(promise); + switch (promise.unwrap(dev.vm.jsc, .mark_handled)) { + .pending => unreachable, // was waited for + .fulfilled => |r| result = r, + .rejected => |e| { + const fail: Failure = .{ .request_handler = e }; + fail.printToConsole(route); + fail.sendAsHttpResponse(resp, route); + return; + }, + } + } + // TODO: This interface and implementation is very poor. but fine until API // considerations become important (as of writing, there are 3 dozen todo // items before it) @@ -290,8 +551,10 @@ fn onServerRequestWithBundle(route: *Route, resp: *Response, ctx: BundleKind.ser // This would allow us to support all of the nice things `new Response` allows const bun_string = result.toBunString(dev.server_global.js()); - if (bun_string.tag == .Dead) @panic("TODO NOT STRING"); defer bun_string.deref(); + if (bun_string.tag == .Dead) { + bun.todoPanic(@src(), "Kit: support non-string return value", .{}); + } const utf8 = bun_string.toUTF8(default_allocator); defer utf8.deinit(); @@ -326,357 +589,246 @@ fn sendOutputFile(file: *const OutputFile, resp: *Response) void { } } +fn sendJavaScriptSource(code: []const u8, resp: *Response) void { + if (code.len == 0) { + resp.writeStatus("202 No Content"); + resp.writeHeaderInt("Content-Length", 0); + resp.end("", true); + return; + } + + resp.writeStatus("200 OK"); + // TODO: CSS, Sourcemap + resp.writeHeader("Content-Type", MimeType.javascript.value); + resp.end(code, true); // TODO: You should never call res.end(huge buffer) +} + fn sendBuiltInNotFound(resp: *Response) void { const message = "404 Not Found"; resp.writeStatus("404 Not Found"); resp.end(message, true); } -// bundling - -const BundleKind = enum { - client, - server, - - fn Bundle(kind: BundleKind) type { - return switch (kind) { - .client => ClientBundle, - .server => ServerBundle, - }; - } - - /// Routing information from uws.Request is stack allocated. - /// This union has no type tag because it can be inferred from surrounding data. - fn Context(kind: BundleKind) type { - return switch (kind) { - .client => struct { file_name: []const u8 }, +/// The paradigm of Kit's incremental state is to store a separate list of files +/// than the Graph in bundle_v2. When watch events happen, the bundler is run on +/// the changed files, excluding non-stale files via `isFileStale`. +/// +/// Upon bundle completion, both `client_graph` and `server_graph` have their +/// `addChunk` methods called with all new chunks, counting the total length +/// needed. A call to `takeBundle` joins all of the chunks, resulting in the +/// code to send to client or evaluate on the server. +/// +/// This approach was selected as it resulted in the fewest changes in the +/// bundler. It also allows the bundler to avoid memory buildup by ensuring its +/// arenas don't live too long. +/// +/// Since all routes share the two graphs, bundling a new route that shared +/// a module from a previously bundled route will perform the same exclusion +/// behavior that rebuilds use. This also ensures that two routes on the server +/// do not emit duplicate dependencies. By tracing `imports` on each file in +/// the module graph recursively, the full bundle for any given route can +/// be re-materialized (required when pressing Cmd+R after any client update) +pub fn IncrementalGraph(side: kit.Side) type { + return struct { + owner: *DevServer, + + bundled_files: bun.StringArrayHashMapUnmanaged(File) = .{}, + stale_files: bun.bit_set.DynamicBitSetUnmanaged = .{}, + + server_is_rsc: if (side == .server) bun.bit_set.DynamicBitSetUnmanaged else void = + if (side == .server) .{}, + server_is_ssr: if (side == .server) bun.bit_set.DynamicBitSetUnmanaged else void = + if (side == .server) .{}, + + /// Byte length of every file queued for concatenation + current_incremental_chunk_len: usize = 0, + current_incremental_chunk_parts: std.ArrayListUnmanaged(switch (side) { + .client => Index, + // these slices do not outlive the bundler, and must be joined + // before its arena is deinitialized. + .server => []const u8, + }) = .{}, + + /// An index into `bundled_files` or `stale_files` + pub const Index = enum(u32) { _ }; + + pub const File = switch (side) { + // The server's incremental graph does not store previously bundled + // code because there is only one instance of the server. Instead, + // it stores which .server => struct {}, - }; - } + .client => struct { + /// allocated by default_allocator + code: []const u8, + // /// To re-assemble a stale bundle (browser hard-reload), follow this recursively + // imports: []Index, - inline fn completionFunction(comptime kind: BundleKind) fn (*Route, *Response, kind.Context(), *kind.Bundle()) void { - return switch (kind) { - .client => onAssetRequestWithBundle, - .server => onServerRequestWithBundle, + // routes: u32, + }, }; - } - const AnyContext: type = @Type(.{ - .Union = .{ - .layout = .auto, - .tag_type = null, - .fields = &fields: { - const values = std.enums.values(BundleKind); - var fields: [values.len]std.builtin.Type.UnionField = undefined; - for (&fields, values) |*field, kind| { - field.* = .{ - .name = @tagName(kind), - .type = kind.Context(), - .alignment = @alignOf(kind.Context()), - }; - } - break :fields fields; - }, - .decls = &.{}, - }, - }); + pub fn addChunk( + g: *@This(), + abs_path: []const u8, + chunk: bun.bundle_v2.CompileResult, + is_ssr_graph: bool, + ) !void { + const code = chunk.code(); + if (code.len == 0) return; + + g.current_incremental_chunk_len += code.len; + + if (g.owner.dump_dir) |dump_dir| { + const cwd = g.owner.cwd; + var a: bun.PathBuffer = undefined; + var b: [bun.MAX_PATH_BYTES * 2]u8 = undefined; + const rel_path = bun.path.relativeBufZ(&a, cwd, abs_path); + const size = std.mem.replacementSize(u8, rel_path, "../", "_.._/"); + _ = std.mem.replace(u8, rel_path, "../", "_.._/", &b); + const rel_path_escaped = b[0..size]; + dumpBundle(dump_dir, switch (side) { + .client => .client, + .server => if (is_ssr_graph) .ssr else .server, + }, rel_path_escaped, code, true) catch |err| { + bun.handleErrorReturnTrace(err, @errorReturnTrace()); + Output.warn("Could not dump bundle: {}", .{err}); + }; + } - inline fn initAnyContext(comptime kind: BundleKind, data: kind.Context()) AnyContext { - return @unionInit(AnyContext, @tagName(kind), data); - } -}; + const gop = try g.bundled_files.getOrPut(default_allocator, abs_path); -/// This will either immediately call `kind.completionFunction()`, or schedule a -/// task to call it when the bundle is ready. The completion function is allowed -/// to use yield. -fn getOrEnqueueBundle( - dev: *DevServer, - resp: *Response, - route: *Route, - comptime kind: BundleKind, - ctx: kind.Context(), -) void { - // const bundler = &dev.bundler; - const bundle = switch (kind) { - .client => &route.client_bundle, - .server => &route.server_bundle, - }; + switch (side) { + .client => { + if (gop.found_existing) { + bun.default_allocator.free(gop.value_ptr.code); + } + gop.value_ptr.* = .{ + .code = chunk.code(), + // .imports = &.{}, + }; + try g.current_incremental_chunk_parts.append(default_allocator, @enumFromInt(gop.index)); + }, + .server => { + // TODO: THIS ALLOCATION STRATEGY SUCKS. IT DOESNT DESERVE TO SHIP + if (!gop.found_existing) { + try g.server_is_ssr.resize(default_allocator, gop.index + 1, false); + try g.server_is_rsc.resize(default_allocator, gop.index + 1, false); + } - switch (bundle.*) { - .unqueued => { - // TODO: use an object pool for this. `bun.ObjectPool` needs a refactor before it can be used - const cb = BundleTask.DeferredRequest.newNode(resp, kind.initAnyContext(ctx)); - - const task = bun.new(BundleTask, .{ - .owner = dev, - .route = route, - .kind = kind, - .plugins = null, - .handlers = .{ .first = cb }, - }); - bundle.* = .{ .pending = task }; - dev.bundle_thread.enqueue(task); - }, - .pending => |task| { - const cb = BundleTask.DeferredRequest.newNode(resp, kind.initAnyContext(ctx)); - // This is not a data race, since this list is drained on - // the same thread as this function is called. - task.handlers.prepend(cb); - }, - .failed => |fail| { - fail.sendAsHttpResponse(resp, route, kind); - }, - .value => |*val| { - kind.completionFunction()(route, resp, ctx, val); - }, - } -} + try g.current_incremental_chunk_parts.append(default_allocator, chunk.code()); -const BundleThread = bun.bundle_v2.BundleThread(BundleTask); - -/// A request to bundle something for development. Has one or more pending HTTP requests. -pub const BundleTask = struct { - owner: *DevServer, - route: *Route, - kind: BundleKind, - // env: *bun.DotEnv.Loader, // TODO - plugins: ?*JSC.API.JSBundler.Plugin, - handlers: DeferredRequest.List, - - next: ?*BundleTask = null, - result: BundleV2.Result = .{ .pending = {} }, - - // initialized in the task itself: - concurrent_task: JSC.EventLoopTask = undefined, - bundler: *BundleV2 = undefined, - log: Log = undefined, - - /// There is no function pointer, route, or context on this struct as all of - /// this information is inferable from the associated BundleTask - const DeferredRequest = struct { - /// When cancelled, this is set to null - resp: ?*Response, - /// Only valid if req is non-null - ctx: BundleKind.AnyContext, - - fn newNode(resp: *Response, ctx: BundleKind.AnyContext) *DeferredRequest.List.Node { - const node = bun.new(DeferredRequest.List.Node, .{ - .data = .{ - .resp = resp, - .ctx = ctx, + const bitset = switch (is_ssr_graph) { + true => &g.server_is_ssr, + false => &g.server_is_rsc, + }; + bitset.set(gop.index); }, - }); - resp.onAborted(*DeferredRequest, onCancel, &node.data); - return node; + } } - fn onCancel(node: *DeferredRequest, resp: *Response) void { - node.resp = null; - node.ctx = undefined; - _ = resp; + pub fn ensureStaleBitCapacity(g: *@This()) !void { + try g.stale_files.resize(default_allocator, g.bundled_files.count(), false); } - const List = std.SinglyLinkedList(DeferredRequest); - }; - - pub fn completeOnMainThread(task: *BundleTask) void { - switch (task.kind) { - inline else => |kind| task.completeOnMainThreadWithKind(kind), + pub fn invalidate(g: *@This(), paths: []const []const u8, hashes: []const u32, out_paths: *DualArray([]const u8)) void { + for (paths, hashes) |path, hash| { + const ctx: bun.StringArrayHashMapContext.Prehashed = .{ + .value = hash, + .input = path, + }; + const index = g.bundled_files.getIndexAdapted(path, ctx) orelse + continue; + g.stale_files.set(index); + switch (side) { + .client => out_paths.appendLeft(path), + .server => out_paths.appendRight(path), + } + } } - } - fn completeOnMainThreadWithKind(task: *BundleTask, comptime kind: BundleKind) void { - const route = task.route; - const bundle = switch (kind) { - .client => &route.client_bundle, - .server => &route.server_bundle, + const ChunkKind = enum { + initial_response, + hmr_chunk, }; - assert(bundle.* == .pending); - - if (task.result == .err) { - const fail = Failure.fromLog(&task.log); - fail.printToConsole(route, kind); - task.finishHttpRequestsFailure(&fail); - bundle.* = .{ .failed = fail }; - return; + fn reset(g: *@This()) void { + g.current_incremental_chunk_len = 0; + g.current_incremental_chunk_parts.clearRetainingCapacity(); } - if (task.log.hasAny()) { - Output.warn("Warnings {s} for {s}", .{ - @tagName(task.kind), - route.pattern, - }); - task.log.printForLogLevel(Output.errorWriter()) catch {}; - } - - const files = task.result.value.output_files.items; - bun.assert(files.len > 0); - - const dev = route.dev; - if (dev.dump_dir) |dump_dir| { - dumpBundle(dump_dir, route, kind, files) catch |err| { - bun.handleErrorReturnTrace(err, @errorReturnTrace()); - Output.warn("Could not dump bundle: {}", .{err}); + pub fn takeBundle(g: *@This(), kind: ChunkKind) ![]const u8 { + const runtime = switch (kind) { + .initial_response => bun.kit.getHmrRuntime(side), + .hmr_chunk => "({\n", }; - } - - switch (kind) { - .client => { - // Set the capacity to the exact size required to avoid over-allocation - var files_index: bun.CaseInsensitiveASCIIStringArrayHashMapUnmanaged(void) = .{}; - files_index.entries.setCapacity(default_allocator, files.len) catch bun.outOfMemory(); - files_index.entries.len = files.len; - for (files_index.keys(), files) |*index_key, file| { - var dest_path = file.dest_path; - if (bun.strings.hasPrefixComptime(dest_path, "./")) { - dest_path = dest_path[2..]; - } - index_key.* = dest_path; - } - files_index.reIndex(default_allocator) catch bun.outOfMemory(); - bundle.* = .{ .value = .{ - .files = files, - .files_index = files_index, - } }; - }, - .server => { - const entry_point = files[0]; - const code = entry_point.value.buffer.bytes; - - const server_code = c.KitLoadServerCode(dev.server_global, bun.String.createLatin1(code)); - dev.vm.waitForPromise(.{ .internal = server_code.promise }); - - switch (server_code.promise.unwrap(dev.vm.jsc, .mark_handled)) { - .pending => unreachable, // promise is settled - .rejected => |err| { - const fail = Failure.fromJSServerLoad(err, dev.server_global.js()); - fail.printToConsole(task.route, .server); - task.finishHttpRequestsFailure(&fail); - bundle.* = .{ .failed = fail }; - return; + // A small amount of metadata is present at the end of the chunk + // to inform the HMR runtime some crucial entry-point info. The + // upper bound of this can be calculated, but 64kb is given to + // ensure no problems. + // + // TODO: is a higher upper bound required on Windows? + // Alternate solution: calculate the upper bound by summing + // framework paths and then reusing that allocation. + var end_buf: [65536]u8 = undefined; + const end = end: { + var fbs = std.io.fixedBufferStream(&end_buf); + const w = fbs.writer(); + switch (kind) { + .initial_response => { + w.writeAll("}, {\n main: ") catch unreachable; + const entry = switch (side) { + .server => g.owner.framework.entry_server, + .client => g.owner.framework.entry_client, + } orelse bun.todoPanic(@src(), "non-framework provided entry-point", .{}); + bun.js_printer.writeJSONString( + bun.path.relative(g.owner.cwd, entry), + @TypeOf(w), + w, + .utf8, + ) catch unreachable; + w.writeAll("\n});") catch unreachable; + }, + .hmr_chunk => { + w.writeAll("\n})") catch unreachable; }, - .fulfilled => |v| bun.assert(v == .undefined), - } - - const handler = c.KitGetRequestHandlerFromModule(dev.server_global, server_code.key); - - if (!handler.isCallable(dev.vm.jsc)) { - @panic("TODO: handle not callable"); } + break :end fbs.getWritten(); + }; - bundle.* = .{ .value = .{ - .files = files, - .server_request_callback = handler, - } }; - }, - } - - task.finishHttpRequestsSuccess(kind, &bundle.value); - } - - fn finishHttpRequestsSuccess(task: *BundleTask, comptime kind: BundleKind, bundle: *kind.Bundle()) void { - const func = comptime kind.completionFunction(); - - while (task.handlers.popFirst()) |node| { - defer bun.destroy(node); - if (node.data.resp) |resp| { - func(task.route, resp, @field(node.data.ctx, @tagName(kind)), bundle); + const files = g.bundled_files.values(); + + // This function performs one allocation, right here + var chunk = try std.ArrayListUnmanaged(u8).initCapacity( + default_allocator, + g.current_incremental_chunk_len + runtime.len + end.len, + ); + + chunk.appendSliceAssumeCapacity(runtime); + for (g.current_incremental_chunk_parts.items) |entry| { + chunk.appendSliceAssumeCapacity(switch (side) { + // entry is an index into files + .client => files[@intFromEnum(entry)].code, + // entry is the '[]const u8' itself + .server => entry, + }); } - } - } - - fn finishHttpRequestsFailure(task: *BundleTask, failure: *const Failure) void { - while (task.handlers.popFirst()) |node| { - defer bun.destroy(node); - if (node.data.resp) |resp| { - failure.sendAsHttpResponse(resp, task.route, task.kind); + chunk.appendSliceAssumeCapacity(end); + assert(chunk.capacity == chunk.items.len); + + if (g.owner.dump_dir) |dump_dir| { + const rel_path_escaped = "latest_chunk.js"; + dumpBundle(dump_dir, switch (side) { + .client => .client, + .server => .server, + }, rel_path_escaped, chunk.items, false) catch |err| { + bun.handleErrorReturnTrace(err, @errorReturnTrace()); + Output.warn("Could not dump bundle: {}", .{err}); + }; } - } - } - pub fn configureBundler(task: *BundleTask, bundler: *Bundler, allocator: Allocator) !void { - const dev = task.route.dev; - - bundler.* = try bun.Bundler.init( - allocator, - &task.log, - std.mem.zeroes(bun.Schema.Api.TransformOptions), - null, // TODO: - ); - - const define = bundler.options.define; - bundler.options = dev.bundler.options; - - bundler.options.define = define; - bundler.options.entry_points = (&task.route.entry_point)[0..1]; - bundler.options.log = &task.log; - bundler.options.output_dir = ""; // this disables filesystem outpu; - bundler.options.output_format = .internal_kit_dev; - bundler.options.out_extensions = bun.StringHashMap([]const u8).init(bundler.allocator); - bundler.options.react_fast_refresh = task.kind == .client; - - bundler.options.public_path = switch (task.kind) { - .client => task.route.clientPublicPath(), - .server => task.route.dev.cwd, - }; - bundler.options.target = switch (task.kind) { - .client => .browser, - .server => .bun, - }; - bundler.options.entry_naming = switch (task.kind) { - // Always name it "client.{js/css}" so that the server can know - // the entry-point script without waiting on a client bundle. - .client => "client.[ext]", - // For uniformity - .server => "server.[ext]", - }; - bundler.options.tree_shaking = false; - bundler.options.minify_syntax = true; - - bundler.configureLinker(); - try bundler.configureDefines(); - - // The following are from Vite: https://vitejs.dev/guide/env-and-mode - // TODO: MODE, BASE_URL - try bundler.options.define.insert( - allocator, - "import.meta.env.DEV", - Define.Data.initBoolean(true), - ); - try bundler.options.define.insert( - allocator, - "import.meta.env.PROD", - Define.Data.initBoolean(false), - ); - try bundler.options.define.insert( - allocator, - "import.meta.env.SSR", - Define.Data.initBoolean(task.kind == .server), - ); - - bundler.resolver.opts = bundler.options; - bundler.resolver.watcher = dev.bundler.resolver.watcher; - } - - pub fn completeMini(task: *BundleTask, _: *void) void { - task.completeOnMainThread(); - } - - pub fn completeOnBundleThread(task: *BundleTask) void { - task.route.dev.vm.event_loop.enqueueTaskConcurrent(task.concurrent_task.js.from(task, .manual_deinit)); - } -}; - -/// Bundling should be concurrent, deduplicated, and cached. -/// This acts as a sort of "native promise" -fn BundlePromise(T: type) type { - return union(enum) { - unqueued, - pending: *BundleTask, - failed: Failure, - value: T, + return chunk.items; + } }; } @@ -687,6 +839,7 @@ fn BundlePromise(T: type) type { /// In the case a route was not able to fully compile, the `Failure` is stored /// so that a browser refreshing the page can display this failure. const Failure = union(enum) { + zig_error: anyerror, /// Bundler and module resolution use `bun.logger` to report multiple errors at once. bundler: std.ArrayList(bun.logger.Msg), /// Thrown JavaScript exception while loading server code. @@ -711,15 +864,14 @@ const Failure = union(enum) { // TODO: deduplicate the two methods here. that isnt trivial because one has to // style with ansi codes, and the other has to style with HTML. - fn printToConsole(fail: *const Failure, route: *const Route, kind: BundleKind) void { + fn printToConsole(fail: *const Failure, route: *const Route) void { defer Output.flush(); Output.prettyErrorln("", .{}); switch (fail.*) { .bundler => |msgs| { - Output.prettyErrorln("Errors while bundling {s}-side for '{s}'", .{ - @tagName(kind), + Output.prettyErrorln("Errors while bundling '{s}'", .{ route.pattern, }); Output.flush(); @@ -730,6 +882,13 @@ const Failure = union(enum) { Output.enable_ansi_colors_stderr, ) catch {}; }, + .zig_error => |err| { + Output.prettyErrorln("Error while bundling '{s}': {s}", .{ + route.pattern, + @errorName(err), + }); + Output.flush(); + }, .server_load => |strong| { Output.prettyErrorln("Server route handler for '{s}' threw while loading", .{ route.pattern, @@ -750,7 +909,7 @@ const Failure = union(enum) { } } - fn sendAsHttpResponse(fail: *const Failure, resp: *Response, route: *const Route, kind: BundleKind) void { + fn sendAsHttpResponse(fail: *const Failure, resp: *Response, route: *const Route) void { resp.writeStatus("500 Internal Server Error"); var buffer: [32768]u8 = undefined; @@ -760,8 +919,7 @@ const Failure = union(enum) { switch (fail.*) { .bundler => |msgs| { - writer.print("Errors while bundling {s}-side for '{s}'\n\n", .{ - @tagName(kind), + writer.print("Errors while bundling '{s}'\n\n", .{ route.pattern, }) catch break :message null; @@ -769,6 +927,9 @@ const Failure = union(enum) { log.printForLogLevelWithEnableAnsiColors(writer, false) catch break :message null; }, + .zig_error => |err| { + writer.print("Error while bundling '{s}': {s}\n", .{ route.pattern, @errorName(err) }) catch break :message null; + }, .server_load => |strong| { writer.print("Server route handler for '{s}' threw while loading\n\n", .{ route.pattern, @@ -795,22 +956,53 @@ const Failure = union(enum) { }; // For debugging, it is helpful to be able to see bundles. -fn dumpBundle(dump_dir: std.fs.Dir, route: *Route, kind: BundleKind, files: []OutputFile) !void { - for (files) |file| { - const name = bun.path.joinAbsString("/", &.{ - route.pattern, - @tagName(kind), - file.dest_path, - }, .auto)[1..]; - var inner_dir = try dump_dir.makeOpenPath(bun.Dirname.dirname(u8, name).?, .{}); - defer inner_dir.close(); - - switch (file.value) { - .buffer => |buf| { - try inner_dir.writeFile(.{ .data = buf.bytes, .sub_path = bun.path.basename(name) }); - }, - else => |t| Output.panic("TODO: implement dumping .{s}", .{@tagName(t)}), - } +fn dumpBundle(dump_dir: std.fs.Dir, side: kit.Renderer, rel_path: []const u8, chunk: []const u8, wrap: bool) !void { + const name = bun.path.joinAbsString("/", &.{ + @tagName(side), + rel_path, + }, .auto)[1..]; + var inner_dir = try dump_dir.makeOpenPath(bun.Dirname.dirname(u8, name).?, .{}); + defer inner_dir.close(); + + const file = try inner_dir.createFile(bun.path.basename(name), .{}); + defer file.close(); + + var bufw = std.io.bufferedWriter(file.writer()); + + try bufw.writer().print("// {s} bundled for {s}\n", .{ + bun.fmt.quote(rel_path), + @tagName(side), + }); + try bufw.writer().print("// Bundled at {d}, Bun " ++ bun.Global.package_json_version_with_canary ++ "\n", .{ + std.time.nanoTimestamp(), + }); + + // Wrap in an object to make it valid syntax. Regardless, these files + // are never executable on their own as they contain only a single module. + + if (wrap) + try bufw.writer().writeAll("({\n"); + + try bufw.writer().writeAll(chunk); + + if (wrap) + try bufw.writer().writeAll("});\n"); + + try bufw.flush(); +} + +pub fn isFileStale(dev: *DevServer, path: []const u8, side: kit.Renderer) bool { + switch (side) { + inline else => |side_comptime| { + const g = switch (side_comptime) { + .client => &dev.client_graph, + .server => &dev.server_graph, + .ssr => &dev.server_graph, + }; + const index = g.bundled_files.getIndex(path) orelse + return true; // non-existent files are considered stale + return g.stale_files.isSet(index); + }, } } @@ -843,18 +1035,23 @@ const DevWebSocket = struct { dev: *DevServer, pub fn onOpen(dw: *DevWebSocket, ws: AnyWebSocket) void { - _ = ws.send("bun!", .binary, false, false); - std.debug.print("open {*} {}\n", .{ dw, ws }); + _ = dw; // autofix + _ = ws.send("bun!", .text, false, true); + _ = ws.subscribe("TODO"); } pub fn onMessage(dw: *DevWebSocket, ws: AnyWebSocket, msg: []const u8, opcode: uws.Opcode) void { - std.debug.print("message {*} {} {} '{s}'\n", .{ dw, ws, opcode, msg }); + _ = dw; // autofix + _ = ws; // autofix + _ = msg; // autofix + _ = opcode; // autofix } pub fn onClose(dw: *DevWebSocket, ws: AnyWebSocket, exit_code: i32, message: []const u8) void { + _ = ws; // autofix + _ = exit_code; // autofix + _ = message; // autofix defer bun.destroy(dw); - - std.debug.print("close {*} {} {} '{s}'\n", .{ dw, ws, exit_code, message }); } }; @@ -882,11 +1079,101 @@ pub const c = struct { extern fn KitGetRequestHandlerFromModule(global: *DevGlobalObject, module: *JSC.JSString) JSValue; }; -pub fn reload(dev: *DevServer) void { - // TODO: given no arguments, this method is absolutely useless. The watcher - // must be augmented with more information. - _ = dev; - Output.warn("TODO: initiate hot reload", .{}); +pub fn reload(dev: *DevServer, reload_task: *const HotReloadTask) void { + dev.reloadWrap(reload_task) catch bun.todoPanic(@src(), "handle hot-reloading error", .{}); +} + +pub fn reloadWrap(dev: *DevServer, reload_task: *const HotReloadTask) !void { + const sfb = default_allocator; + + const changed_file_paths = reload_task.paths[0..reload_task.count]; + const changed_hashes = reload_task.hashes[0..reload_task.count]; + + defer for (changed_file_paths) |path| default_allocator.free(path); + + var files_to_bundle = try DualArray([]const u8).initCapacity(sfb, changed_file_paths.len * 2); + defer files_to_bundle.deinit(sfb); + inline for (.{ &dev.server_graph, &dev.client_graph }) |g| { + g.invalidate(changed_file_paths, changed_hashes, &files_to_bundle); + } + + bun.todoPanic(@src(), "rewire hot-bundling code", .{}); + + // const route = &dev.routes[0]; + + // const bundle_task = bun.new(BundleTask, .{ + // .owner = dev, + // .route = route, + // .kind = .client, + // .plugins = null, + // .handlers = .{ .first = null }, + // }); + // assert(route.client_bundle != .pending); // todo: rapid reloads + // route.client_bundle = .{ .pending = bundle_task }; + // dev.bundle_thread.enqueue(bundle_task); +} + +pub fn bustDirCache(dev: *DevServer, path: []const u8) bool { + const a = dev.server_bundler.resolver.bustDirCache(path); + const b = dev.client_bundler.resolver.bustDirCache(path); + return a or b; +} + +pub fn getLoaders(dev: *DevServer) *bun.options.Loader.HashTable { + // The watcher needs to know what loader to use for a file, + // therefore, we must ensure that server and client options + // use the same loader set. + return &dev.server_bundler.options.loaders; +} + +/// A data structure to represent two arrays that share a known upper bound. +/// The "left" array starts at the allocation start, and the "right" array +/// starts at the allocation end. +/// +/// An example use-case is having a list of files, but categorizing them +/// into server/client. The total number of files is known. +pub fn DualArray(T: type) type { + return struct { + items: []T, + left_end: u32, + right_start: u32, + + pub fn initCapacity(allocator: Allocator, cap: usize) !@This() { + return .{ + .items = try allocator.alloc(T, cap), + .left_end = 0, + .right_start = @intCast(cap), + }; + } + + pub fn deinit(a: @This(), allocator: Allocator) void { + allocator.free(a.items); + } + + fn hasAny(a: @This()) bool { + return a.left_end != 0 or a.right_start != a.items.len; + } + + pub fn left(a: @This()) []T { + return a.items[0..a.left_end]; + } + + pub fn right(a: @This()) []T { + return a.items[a.right_start..]; + } + + pub fn appendLeft(a: *@This(), item: T) void { + assert(a.left_end < a.right_start); + a.items[a.left_end] = item; + a.left_end += 1; + } + + pub fn appendRight(a: *@This(), item: T) void { + assert(a.right_start > a.left_end); + a.right_start -= 1; + a.items[a.right_start] = item; + } + }; } const std = @import("std"); @@ -896,6 +1183,8 @@ const bun = @import("root").bun; const Environment = bun.Environment; const assert = bun.assert; +const kit = bun.kit; + const Log = bun.logger.Log; const Bundler = bun.bundler.Bundler; @@ -903,7 +1192,6 @@ const BundleV2 = bun.bundle_v2.BundleV2; const Define = bun.options.Define; const OutputFile = bun.options.OutputFile; -// TODO: consider if using system output is not fit const Output = bun.Output; const uws = bun.uws; @@ -923,3 +1211,5 @@ const JSInternalPromise = JSC.JSInternalPromise; pub const HotReloader = JSC.NewHotReloader(DevServer, JSC.EventLoop, false); pub const HotReloadTask = HotReloader.HotReloadTask; + +const ThreadlocalArena = @import("../mimalloc_arena.zig").Arena; diff --git a/src/kit/KitDevGlobalObject.cpp b/src/kit/KitDevGlobalObject.cpp index 7e3aee09777c1..8716fa3790dfd 100644 --- a/src/kit/KitDevGlobalObject.cpp +++ b/src/kit/KitDevGlobalObject.cpp @@ -1,36 +1,50 @@ #include "KitDevGlobalObject.h" #include "JSNextTickQueue.h" #include "JavaScriptCore/GlobalObjectMethodTable.h" +#include "JavaScriptCore/JSInternalPromise.h" #include "headers-handwritten.h" namespace Kit { -#define INHERIT_HOOK_METHOD(name) \ - Zig::GlobalObject::s_globalObjectMethodTable.name +JSC::JSInternalPromise* moduleLoaderImportModule( + JSC::JSGlobalObject* jsGlobalObject, + JSC::JSModuleLoader*, + JSC::JSString* moduleNameValue, + JSC::JSValue parameters, + const JSC::SourceOrigin& sourceOrigin) +{ + // TODO: forward this to the runtime + JSC::VM&vm=jsGlobalObject->vm(); + auto err = JSC::createTypeError(jsGlobalObject, WTF::makeString("Dynamic import should have been replaced with a hook into the module runtime"_s)); + auto* promise = JSC::JSInternalPromise::create(vm, jsGlobalObject->internalPromiseStructure()); + promise->reject(jsGlobalObject, err); + return promise; +} + +#define INHERIT_HOOK_METHOD(name) Zig::GlobalObject::s_globalObjectMethodTable. name -const JSC::GlobalObjectMethodTable DevGlobalObject::s_globalObjectMethodTable = - { - INHERIT_HOOK_METHOD(supportsRichSourceInfo), - INHERIT_HOOK_METHOD(shouldInterruptScript), - INHERIT_HOOK_METHOD(javaScriptRuntimeFlags), - INHERIT_HOOK_METHOD(queueMicrotaskToEventLoop), - INHERIT_HOOK_METHOD(shouldInterruptScriptBeforeTimeout), - INHERIT_HOOK_METHOD(moduleLoaderImportModule), - INHERIT_HOOK_METHOD(moduleLoaderResolve), - INHERIT_HOOK_METHOD(moduleLoaderFetch), - INHERIT_HOOK_METHOD(moduleLoaderCreateImportMetaProperties), - INHERIT_HOOK_METHOD(moduleLoaderEvaluate), - INHERIT_HOOK_METHOD(promiseRejectionTracker), - INHERIT_HOOK_METHOD(reportUncaughtExceptionAtEventLoop), - INHERIT_HOOK_METHOD(currentScriptExecutionOwner), - INHERIT_HOOK_METHOD(scriptExecutionStatus), - INHERIT_HOOK_METHOD(reportViolationForUnsafeEval), - INHERIT_HOOK_METHOD(defaultLanguage), - INHERIT_HOOK_METHOD(compileStreaming), - INHERIT_HOOK_METHOD(instantiateStreaming), - INHERIT_HOOK_METHOD(deriveShadowRealmGlobalObject), - INHERIT_HOOK_METHOD(codeForEval), - INHERIT_HOOK_METHOD(canCompileStrings), +const JSC::GlobalObjectMethodTable DevGlobalObject::s_globalObjectMethodTable = { + INHERIT_HOOK_METHOD(supportsRichSourceInfo), + INHERIT_HOOK_METHOD(shouldInterruptScript), + INHERIT_HOOK_METHOD(javaScriptRuntimeFlags), + INHERIT_HOOK_METHOD(queueMicrotaskToEventLoop), + INHERIT_HOOK_METHOD(shouldInterruptScriptBeforeTimeout), + moduleLoaderImportModule, + INHERIT_HOOK_METHOD(moduleLoaderResolve), + INHERIT_HOOK_METHOD(moduleLoaderFetch), + INHERIT_HOOK_METHOD(moduleLoaderCreateImportMetaProperties), + INHERIT_HOOK_METHOD(moduleLoaderEvaluate), + INHERIT_HOOK_METHOD(promiseRejectionTracker), + INHERIT_HOOK_METHOD(reportUncaughtExceptionAtEventLoop), + INHERIT_HOOK_METHOD(currentScriptExecutionOwner), + INHERIT_HOOK_METHOD(scriptExecutionStatus), + INHERIT_HOOK_METHOD(reportViolationForUnsafeEval), + INHERIT_HOOK_METHOD(defaultLanguage), + INHERIT_HOOK_METHOD(compileStreaming), + INHERIT_HOOK_METHOD(instantiateStreaming), + INHERIT_HOOK_METHOD(deriveShadowRealmGlobalObject), + INHERIT_HOOK_METHOD(codeForEval), + INHERIT_HOOK_METHOD(canCompileStrings), }; DevGlobalObject * diff --git a/src/kit/KitSourceProvider.cpp b/src/kit/KitSourceProvider.cpp index e28e1c4fd175e..4127efaa9ae65 100644 --- a/src/kit/KitSourceProvider.cpp +++ b/src/kit/KitSourceProvider.cpp @@ -15,7 +15,7 @@ namespace Kit { extern "C" LoadServerCodeResult KitLoadServerCode(DevGlobalObject* global, BunString source) { - String string = "kit://server/0/index.js"_s; + String string = "kit://server"_s; JSC::SourceOrigin origin = JSC::SourceOrigin(WTF::URL(string)); JSC::SourceCode sourceCode = JSC::SourceCode(KitSourceProvider::create( source.toWTFString(), diff --git a/src/kit/bun-framework-rsc/client.tsx b/src/kit/bun-framework-rsc/client.tsx new file mode 100644 index 0000000000000..30277f62a57ed --- /dev/null +++ b/src/kit/bun-framework-rsc/client.tsx @@ -0,0 +1 @@ +console.log('incredible'); \ No newline at end of file diff --git a/src/kit/bun-framework-rsc/server.tsx b/src/kit/bun-framework-rsc/server.tsx new file mode 100644 index 0000000000000..7c713dac40dab --- /dev/null +++ b/src/kit/bun-framework-rsc/server.tsx @@ -0,0 +1,33 @@ +/// +import type { Kit } from "bun"; +import React from "react"; +import { PassThrough } from "node:stream"; +// @ts-ignore +import { renderToPipeableStream } from "react-server-dom-webpack/server"; +import { renderToHtml } from './ssr' with { bunKitGraph: 'ssr' }; +import { serverManifest } from 'bun:kit/server'; + +export default async function (request: Request, route: any, meta: Kit.RouteMetadata): Promise { + const Route = route.default; + const page = ( + + + + Bun + React Server Components + {meta.styles.map(url => )} + + + + {meta.scripts.map(url =>