diff --git a/docs/docs/api/appkit/Enumeration.ResourceType.md b/docs/docs/api/appkit/Enumeration.ResourceType.md
index 77b9d29f..2dc2e2b3 100644
--- a/docs/docs/api/appkit/Enumeration.ResourceType.md
+++ b/docs/docs/api/appkit/Enumeration.ResourceType.md
@@ -44,6 +44,14 @@ JOB: "job";
***
+### POSTGRES
+
+```ts
+POSTGRES: "postgres";
+```
+
+***
+
### SECRET
```ts
diff --git a/docs/docs/api/appkit/Interface.ResourceFieldEntry.md b/docs/docs/api/appkit/Interface.ResourceFieldEntry.md
index 198334e4..2874887b 100644
--- a/docs/docs/api/appkit/Interface.ResourceFieldEntry.md
+++ b/docs/docs/api/appkit/Interface.ResourceFieldEntry.md
@@ -5,6 +5,16 @@ Single-value types use one key (e.g. id); multi-value types (database, secret) u
## Properties
+### bundleIgnore?
+
+```ts
+optional bundleIgnore: boolean;
+```
+
+When true, this field is excluded from Databricks bundle configuration (databricks.yml) generation.
+
+***
+
### description?
```ts
@@ -15,10 +25,50 @@ Human-readable description for this field
***
-### env
+### env?
```ts
-env: string;
+optional env: string;
```
Environment variable name for this field
+
+***
+
+### examples?
+
+```ts
+optional examples: string[];
+```
+
+Example values showing the expected format for this field
+
+***
+
+### localOnly?
+
+```ts
+optional localOnly: boolean;
+```
+
+When true, this field is only generated for local .env files. The Databricks Apps platform auto-injects it at deploy time.
+
+***
+
+### resolve?
+
+```ts
+optional resolve: string;
+```
+
+Named resolver prefixed by resource type (e.g., 'postgres:host'). The CLI resolves this value during the init prompt flow.
+
+***
+
+### value?
+
+```ts
+optional value: string;
+```
+
+Static value for this field. Used when no prompted or resolved value exists.
diff --git a/docs/docs/api/appkit/TypeAlias.ResourcePermission.md b/docs/docs/api/appkit/TypeAlias.ResourcePermission.md
index 76bc8723..484c9f1d 100644
--- a/docs/docs/api/appkit/TypeAlias.ResourcePermission.md
+++ b/docs/docs/api/appkit/TypeAlias.ResourcePermission.md
@@ -11,6 +11,7 @@ type ResourcePermission =
| UcFunctionPermission
| UcConnectionPermission
| DatabasePermission
+ | PostgresPermission
| GenieSpacePermission
| ExperimentPermission
| AppPermission;
diff --git a/docs/docs/plugins/assets/lakebase-setup/step-1.png b/docs/docs/plugins/assets/lakebase-setup/step-1.png
deleted file mode 100644
index 168ab7bd..00000000
Binary files a/docs/docs/plugins/assets/lakebase-setup/step-1.png and /dev/null differ
diff --git a/docs/docs/plugins/assets/lakebase-setup/step-2.png b/docs/docs/plugins/assets/lakebase-setup/step-2.png
deleted file mode 100644
index 5d99b912..00000000
Binary files a/docs/docs/plugins/assets/lakebase-setup/step-2.png and /dev/null differ
diff --git a/docs/docs/plugins/assets/lakebase-setup/step-4.png b/docs/docs/plugins/assets/lakebase-setup/step-4.png
deleted file mode 100644
index 11b853ef..00000000
Binary files a/docs/docs/plugins/assets/lakebase-setup/step-4.png and /dev/null differ
diff --git a/docs/docs/plugins/assets/lakebase-setup/step-5.png b/docs/docs/plugins/assets/lakebase-setup/step-5.png
deleted file mode 100644
index 3fc65dce..00000000
Binary files a/docs/docs/plugins/assets/lakebase-setup/step-5.png and /dev/null differ
diff --git a/docs/docs/plugins/assets/lakebase-setup/step-6.png b/docs/docs/plugins/assets/lakebase-setup/step-6.png
deleted file mode 100644
index bb13e2bd..00000000
Binary files a/docs/docs/plugins/assets/lakebase-setup/step-6.png and /dev/null differ
diff --git a/docs/docs/plugins/lakebase.md b/docs/docs/plugins/lakebase.md
index 95c70647..c0f12869 100644
--- a/docs/docs/plugins/lakebase.md
+++ b/docs/docs/plugins/lakebase.md
@@ -4,10 +4,6 @@ sidebar_position: 4
# Lakebase plugin
-:::info
-Currently, the Lakebase plugin currently requires a one-time manual setup to connect your Databricks App with your Lakebase database. An automated setup process is planned for an upcoming future release.
-:::
-
Provides a PostgreSQL connection pool for Databricks Lakebase Autoscaling with automatic OAuth token refresh.
**Key features:**
@@ -15,87 +11,23 @@ Provides a PostgreSQL connection pool for Databricks Lakebase Autoscaling with a
- Automatic OAuth token refresh (1-hour tokens, 2-minute refresh buffer)
- Token caching to minimize API calls
- Built-in OpenTelemetry instrumentation (query duration, pool connections, token refresh)
+- AppKit logger configured by default for query and connection events
-## Setting up Lakebase
-
-Before using the plugin, you need to connect your Databricks App's service principal to your Lakebase database.
-
-### 1. Find your app's service principal
-
-Create a Databricks App from the UI (`Compute > Apps > Create App > Create a custom app`). Navigate to the **Environment** tab and note the `DATABRICKS_CLIENT_ID` value — this is the service principal that will connect to your Lakebase database.
-
-
-
-### 2. Find your Project ID and Branch ID
+## Getting started with the Lakebase
-Create a new Lakebase Postgres Autoscaling project. Navigate to your Lakebase project's branch details and switch to the **Compute** tab. Note the **Project ID** and **Branch ID** from the URL.
+The easiest way to get started with the Lakebase plugin is to use the Databricks CLI to create a new Databricks app with AppKit installed and the Lakebase plugin.
-
+### Prerequisites
-### 3. Find your endpoint
-
-Use the Databricks CLI to list endpoints for the branch. Note the `name` field from the output — this is your `LAKEBASE_ENDPOINT` value.
-
-```bash
-databricks postgres list-endpoints projects/{project-id}/branches/{branch-id}
-```
-
-Example output:
-
-```json
-[
- {
- "create_time": "2026-02-19T12:13:02Z",
- "name": "projects/{project-id}/branches/{branch-id}/endpoints/primary"
- }
-]
-```
+- [Node.js](https://nodejs.org) v22+ environment with `npm`
+- Databricks CLI (v0.287.0 or higher): install and configure it according to the [official tutorial](https://docs.databricks.com/aws/en/dev-tools/cli/tutorial).
+- A new Databricks app with AppKit installed. See [Bootstrap a new Databricks app](../index.md#quick-start-options) for more details.
-### 4. Get connection parameters
+### Steps
-Click the **Connect** button on your Lakebase branch and copy the `PGHOST` and `PGDATABASE` values for later.
-
-
-
-### 5. Grant access to the service principal
-
-Navigate to the **SQL Editor** tab on your Lakebase branch. Run the following SQL against the `databricks_postgres` database, replacing the service principal ID in the `DECLARE` block with the `DATABRICKS_CLIENT_ID` value from step 1:
-
-```sql
-CREATE EXTENSION IF NOT EXISTS databricks_auth;
-
-DO $$
-DECLARE
- sp TEXT := 'your-service-principal-id'; -- Replace with DATABRICKS_CLIENT_ID from Step 1
-BEGIN
- -- Create service principal role
- PERFORM databricks_create_role(sp, 'SERVICE_PRINCIPAL');
-
- -- Connection and schema access
- EXECUTE format('GRANT CONNECT ON DATABASE "databricks_postgres" TO %I', sp);
- EXECUTE format('GRANT ALL ON SCHEMA public TO %I', sp);
-
- -- Privileges on existing objects
- EXECUTE format('GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO %I', sp);
- EXECUTE format('GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO %I', sp);
- EXECUTE format('GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA public TO %I', sp);
- EXECUTE format('GRANT ALL PRIVILEGES ON ALL PROCEDURES IN SCHEMA public TO %I', sp);
-
- -- Default privileges on future objects you create
- EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO %I', sp);
- EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO %I', sp);
- EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON FUNCTIONS TO %I', sp);
- EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON ROUTINES TO %I', sp);
-END $$;
-```
-
-
-
-### 6. Verify the role
-
-Navigate to the **Roles & Databases** tab and confirm the role is visible. You may need to fully refresh the page.
-
-
+1. Firstly, create a new Lakebase Postgres Autoscaling project according to the [Get started documentation](https://docs.databricks.com/aws/en/oltp/projects/get-started).
+1. To add the Lakebase plugin to your project, run the `databricks apps init` command and interactively select the **Lakebase** plugin. The CLI will guide you through picking a Lakebase project, branch, and database.
+ - When asked, select **Yes** to deploy the app to Databricks Apps right after its creation.
## Basic usage
@@ -107,33 +39,6 @@ await createApp({
});
```
-## Environment variables
-
-The required environment variables:
-
-| Variable | Description |
-|---|---|
-| `PGHOST` | Lakebase host |
-| `PGDATABASE` | Database name |
-| `LAKEBASE_ENDPOINT` | Endpoint resource path (e.g. `projects/.../branches/.../endpoints/...`) |
-| `PGSSLMODE` | TLS mode — set to `require` |
-
-Ensure that those environment variables are set both for local development (`.env` file) and for deployment (`app.yaml` file):
-
-```yaml
-env:
- - name: LAKEBASE_ENDPOINT
- value: projects/{project-id}/branches/{branch-id}/endpoints/primary
- - name: PGHOST
- value: {your-lakebase-host}
- - name: PGDATABASE
- value: databricks_postgres
- - name: PGSSLMODE
- value: require
-```
-
-For the full configuration reference (SSL, pool size, timeouts, logging, ORM examples), see the [`@databricks/lakebase` README](https://github.com/databricks/appkit/blob/main/packages/lakebase/README.md).
-
## Accessing the pool
After initialization, access Lakebase through the `AppKit.lakebase` object:
@@ -143,9 +48,17 @@ const AppKit = await createApp({
plugins: [server(), lakebase()],
});
-// Direct query (parameterized)
+await AppKit.lakebase.query(`CREATE SCHEMA IF NOT EXISTS app`);
+
+await AppKit.lakebase.query(`CREATE TABLE IF NOT EXISTS app.orders (
+ id SERIAL PRIMARY KEY,
+ user_id VARCHAR(255) NOT NULL,
+ amount DECIMAL(10, 2) NOT NULL,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+)`);
+
const result = await AppKit.lakebase.query(
- "SELECT * FROM orders WHERE user_id = $1",
+ "SELECT * FROM app.orders WHERE user_id = $1",
[userId],
);
@@ -157,7 +70,32 @@ const ormConfig = AppKit.lakebase.getOrmConfig(); // { host, port, database, ..
const pgConfig = AppKit.lakebase.getPgConfig(); // pg.PoolConfig
```
-## Configuration options
+## Configuration
+
+### Environment variables
+
+The required environment variables are:
+
+| Variable | Description |
+|---|---|
+| `LAKEBASE_ENDPOINT` | Endpoint resource path (e.g. `projects/.../branches/.../endpoints/...`) |
+| `PGHOST` | Lakebase host (auto-injected in production by the `postgres` Databricks Apps resource) |
+| `PGDATABASE` | Database name (auto-injected in production by the `postgres` Databricks Apps resource) |
+| `PGSSLMODE` | TLS mode - set to `require` (auto-injected in production by the `postgres` Databricks Apps resource) |
+
+When deployed to Databricks Apps with a `postgres` database resource configured, `PGHOST`, `PGDATABASE`, `PGSSLMODE`, `PGUSER`, `PGPORT`, and `PGAPPNAME` are automatically injected by the platform. Only `LAKEBASE_ENDPOINT` must be set explicitly:
+
+```yaml
+env:
+ - name: LAKEBASE_ENDPOINT
+ valueFrom: postgres
+```
+
+For local development, the `.env` file is automatically generated by `databricks apps init` with the correct values for your Lakebase project.
+
+For the full configuration reference (SSL, pool size, timeouts, logging, ORM examples), see the [`@databricks/lakebase` README](https://github.com/databricks/appkit/blob/main/packages/lakebase/README.md).
+
+### Pool configuration
Pass a `pool` object to override any defaults:
@@ -174,3 +112,75 @@ await createApp({
],
});
```
+
+## Database Permissions
+
+When you create the app with the Lakebase resource using the [Getting started](#getting-started-with-the-lakebase) guide, the Service Principal is automatically granted `CONNECT_AND_CREATE` permission on the `postgres` resource. This lets the Service Principal connect to the database and create new objects, but **not access any existing schemas or tables.**
+
+### Local development
+
+To develop locally against a deployed Lakebase database:
+
+1. **Deploy the app first.** The Service Principal creates the database schema and tables on first deploy. Apps generated from `databricks apps init` handle this automatically - they check if tables exist on startup and skip creation if they do.
+
+2. **Grant `databricks_superuser` via the Lakebase UI:**
+ 1. Open the Lakebase Autoscaling UI and navigate to your project's **Branch Overview** page.
+ 2. Click **Add role** (or **Edit role** if your OAuth role already exists).
+ 3. Select your Databricks identity as the principal and check the **`databricks_superuser`** system role.
+
+3. **Run locally** - your Databricks user identity (email) is used for OAuth authentication. The `databricks_superuser` role gives full **DML access** (read/write data) but **not DDL** (creating schemas or tables) - that's why deploying first matters (see note below).
+
+For other users, use the same **Add role** flow in the Lakebase UI to create an OAuth role with `databricks_superuser` for each user.
+
+:::tip
+[Postgres password authentication](https://docs.databricks.com/aws/en/oltp/projects/authentication#overview) is a simpler alternative that avoids OAuth role permission complexity. However, it requires you to set up a password for the user in the **Branch Overview** page in the Lakebase Autoscaling UI.
+:::
+
+:::info[Why deploy first?]
+When the app is deployed, the Service Principal creates schemas and tables and becomes their owner. A `databricks_superuser` has full **DML access** (SELECT, INSERT, UPDATE, DELETE) to these objects, but **cannot run DDL** (CREATE SCHEMA, CREATE TABLE) on schemas owned by the Service Principal. Deploying first ensures all objects exist before local development begins.
+:::
+
+### Fine-grained permissions
+
+For most use cases, `databricks_superuser` is sufficient. If you need schema-level grants instead, refer to the official documentation:
+
+- [Manage database permissions](https://docs.databricks.com/aws/en/oltp/projects/manage-roles-permissions)
+- [Postgres roles](https://docs.databricks.com/aws/en/oltp/projects/postgres-roles)
+
+
+SQL script for fine-grained grants
+
+Deploy and run the app at least once before executing these grants so the Service Principal initializes the database schema first.
+
+Replace `subject` with the user email and `schema` with your schema name:
+
+```sql
+CREATE EXTENSION IF NOT EXISTS databricks_auth;
+
+DO $$
+DECLARE
+ subject TEXT := 'your-subject'; -- User email like name@databricks.com
+ schema TEXT := 'your_schema'; -- Replace 'your_schema' with your schema name
+BEGIN
+ -- Create OAuth role for the Databricks identity
+ PERFORM databricks_create_role(subject, 'USER');
+
+ -- Connection and schema access
+ EXECUTE format('GRANT CONNECT ON DATABASE "databricks_postgres" TO %I', subject);
+ EXECUTE format('GRANT ALL ON SCHEMA %s TO %I', schema, subject);
+
+ -- Privileges on existing objects
+ EXECUTE format('GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s TO %I', schema, subject);
+ EXECUTE format('GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA %s TO %I', schema, subject);
+ EXECUTE format('GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA %s TO %I', schema, subject);
+ EXECUTE format('GRANT ALL PRIVILEGES ON ALL PROCEDURES IN SCHEMA %s TO %I', schema, subject);
+
+ -- Default privileges on future objects
+ EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %s GRANT ALL ON TABLES TO %I', schema, subject);
+ EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %s GRANT ALL ON SEQUENCES TO %I', schema, subject);
+ EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %s GRANT ALL ON FUNCTIONS TO %I', schema, subject);
+ EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %s GRANT ALL ON ROUTINES TO %I', schema, subject);
+END $$;
+```
+
+
diff --git a/docs/static/schemas/plugin-manifest.schema.json b/docs/static/schemas/plugin-manifest.schema.json
index e4c70ed3..cae2e688 100644
--- a/docs/static/schemas/plugin-manifest.schema.json
+++ b/docs/static/schemas/plugin-manifest.schema.json
@@ -111,6 +111,7 @@
"uc_function",
"uc_connection",
"database",
+ "postgres",
"genie_space",
"experiment",
"app"
@@ -162,6 +163,11 @@
"enum": ["CAN_CONNECT_AND_CREATE"],
"description": "Permission for database resources"
},
+ "postgresPermission": {
+ "type": "string",
+ "enum": ["CAN_CONNECT_AND_CREATE"],
+ "description": "Permission for Postgres resources"
+ },
"genieSpacePermission": {
"type": "string",
"enum": ["CAN_VIEW", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"],
@@ -179,7 +185,6 @@
},
"resourceFieldEntry": {
"type": "object",
- "required": ["env"],
"properties": {
"env": {
"type": "string",
@@ -190,20 +195,37 @@
"description": {
"type": "string",
"description": "Human-readable description for this field"
+ },
+ "bundleIgnore": {
+ "type": "boolean",
+ "default": false,
+ "description": "When true, this field is excluded from Databricks bundle configuration (databricks.yml) generation."
+ },
+ "examples": {
+ "type": "array",
+ "items": { "type": "string" },
+ "description": "Example values showing the expected format for this field"
+ },
+ "localOnly": {
+ "type": "boolean",
+ "default": false,
+ "description": "When true, this field is only generated for local .env files. The Databricks Apps platform auto-injects it at deploy time."
+ },
+ "value": {
+ "type": "string",
+ "description": "Static value for this field. Used when no prompted or resolved value exists."
+ },
+ "resolve": {
+ "type": "string",
+ "pattern": "^[a-z_]+:[a-zA-Z]+$",
+ "description": "Named resolver prefixed by resource type (e.g., 'postgres:host'). The CLI resolves this value during the init prompt flow."
}
},
"additionalProperties": false
},
"resourceRequirement": {
"type": "object",
- "required": [
- "type",
- "alias",
- "resourceKey",
- "description",
- "permission",
- "fields"
- ],
+ "required": ["type", "alias", "resourceKey", "description", "permission"],
"properties": {
"type": {
"$ref": "#/$defs/resourceType"
@@ -337,6 +359,17 @@
}
}
},
+ {
+ "if": {
+ "properties": { "type": { "const": "postgres" } },
+ "required": ["type"]
+ },
+ "then": {
+ "properties": {
+ "permission": { "$ref": "#/$defs/postgresPermission" }
+ }
+ }
+ },
{
"if": {
"properties": { "type": { "const": "genie_space" } },
diff --git a/packages/appkit/src/cache/storage/defaults.ts b/packages/appkit/src/cache/storage/defaults.ts
index 2db682a8..16b0b456 100644
--- a/packages/appkit/src/cache/storage/defaults.ts
+++ b/packages/appkit/src/cache/storage/defaults.ts
@@ -6,6 +6,8 @@ export const inMemoryStorageDefaults = {
/** Default configuration for Lakebase storage */
export const lakebaseStorageDefaults = {
+ /** Schema name for the cache tables */
+ schemaName: "appkit",
/** Table name for the cache */
tableName: "appkit_cache_entries",
/** Maximum number of bytes in the cache */
diff --git a/packages/appkit/src/cache/storage/persistent.ts b/packages/appkit/src/cache/storage/persistent.ts
index d9affd73..fb809846 100644
--- a/packages/appkit/src/cache/storage/persistent.ts
+++ b/packages/appkit/src/cache/storage/persistent.ts
@@ -24,7 +24,9 @@ const logger = createLogger("cache:persistent");
*/
export class PersistentStorage implements CacheStorage {
private readonly pool: pg.Pool;
+ private readonly schemaName: string;
private readonly tableName: string;
+ private readonly qualifiedTableName: string;
private readonly maxBytes: number;
private readonly maxEntryBytes: number;
private readonly evictionBatchSize: number;
@@ -40,7 +42,9 @@ export class PersistentStorage implements CacheStorage {
this.evictionCheckProbability =
config.evictionCheckProbability ??
lakebaseStorageDefaults.evictionCheckProbability;
- this.tableName = lakebaseStorageDefaults.tableName; // hardcoded, safe for now
+ this.schemaName = lakebaseStorageDefaults.schemaName;
+ this.tableName = lakebaseStorageDefaults.tableName;
+ this.qualifiedTableName = `${this.schemaName}.${this.tableName}`;
this.initialized = false;
}
@@ -70,9 +74,10 @@ export class PersistentStorage implements CacheStorage {
const result = await this.pool.query<{
value: Buffer;
expiry: string;
- }>(`SELECT value, expiry FROM ${this.tableName} WHERE key_hash = $1`, [
- keyHash,
- ]);
+ }>(
+ `SELECT value, expiry FROM ${this.qualifiedTableName} WHERE key_hash = $1`,
+ [keyHash],
+ );
if (result.rows.length === 0) return null;
@@ -81,7 +86,7 @@ export class PersistentStorage implements CacheStorage {
// fire-and-forget update
this.pool
.query(
- `UPDATE ${this.tableName} SET last_accessed = NOW() WHERE key_hash = $1`,
+ `UPDATE ${this.qualifiedTableName} SET last_accessed = NOW() WHERE key_hash = $1`,
[keyHash],
)
.catch(() => {
@@ -125,7 +130,7 @@ export class PersistentStorage implements CacheStorage {
}
await this.pool.query(
- `INSERT INTO ${this.tableName} (key_hash, key, value, byte_size, expiry, created_at, last_accessed)
+ `INSERT INTO ${this.qualifiedTableName} (key_hash, key, value, byte_size, expiry, created_at, last_accessed)
VALUES ($1, $2, $3, $4, $5, NOW(), NOW())
ON CONFLICT (key_hash)
DO UPDATE SET value = $3, byte_size = $4, expiry = $5, last_accessed = NOW()
@@ -142,15 +147,16 @@ export class PersistentStorage implements CacheStorage {
async delete(key: string): Promise {
await this.ensureInitialized();
const keyHash = this.hashKey(key);
- await this.pool.query(`DELETE FROM ${this.tableName} WHERE key_hash = $1`, [
- keyHash,
- ]);
+ await this.pool.query(
+ `DELETE FROM ${this.qualifiedTableName} WHERE key_hash = $1`,
+ [keyHash],
+ );
}
/** Clear the persistent storage */
async clear(): Promise {
await this.ensureInitialized();
- await this.pool.query(`TRUNCATE TABLE ${this.tableName}`);
+ await this.pool.query(`TRUNCATE TABLE ${this.qualifiedTableName}`);
}
/**
@@ -163,7 +169,7 @@ export class PersistentStorage implements CacheStorage {
const keyHash = this.hashKey(key);
const result = await this.pool.query<{ exists: boolean }>(
- `SELECT EXISTS(SELECT 1 FROM ${this.tableName} WHERE key_hash = $1) as exists`,
+ `SELECT EXISTS(SELECT 1 FROM ${this.qualifiedTableName} WHERE key_hash = $1) as exists`,
[keyHash],
);
@@ -178,7 +184,7 @@ export class PersistentStorage implements CacheStorage {
await this.ensureInitialized();
const result = await this.pool.query<{ count: string }>(
- `SELECT COUNT(*) as count FROM ${this.tableName}`,
+ `SELECT COUNT(*) as count FROM ${this.qualifiedTableName}`,
);
return parseInt(result.rows[0]?.count ?? "0", 10);
}
@@ -188,7 +194,7 @@ export class PersistentStorage implements CacheStorage {
await this.ensureInitialized();
const result = await this.pool.query<{ total: string }>(
- `SELECT COALESCE(SUM(byte_size), 0) as total FROM ${this.tableName}`,
+ `SELECT COALESCE(SUM(byte_size), 0) as total FROM ${this.qualifiedTableName}`,
);
return parseInt(result.rows[0]?.total ?? "0", 10);
}
@@ -226,7 +232,7 @@ export class PersistentStorage implements CacheStorage {
async cleanupExpired(): Promise {
await this.ensureInitialized();
const result = await this.pool.query<{ count: string }>(
- `WITH deleted as (DELETE FROM ${this.tableName} WHERE expiry < $1 RETURNING *) SELECT COUNT(*) as count FROM deleted`,
+ `WITH deleted as (DELETE FROM ${this.qualifiedTableName} WHERE expiry < $1 RETURNING *) SELECT COUNT(*) as count FROM deleted`,
[Date.now()],
);
return parseInt(result.rows[0]?.count ?? "0", 10);
@@ -243,8 +249,8 @@ export class PersistentStorage implements CacheStorage {
}
await this.pool.query(
- `DELETE FROM ${this.tableName} WHERE key_hash IN
- (SELECT key_hash FROM ${this.tableName} ORDER BY last_accessed ASC LIMIT $1)`,
+ `DELETE FROM ${this.qualifiedTableName} WHERE key_hash IN
+ (SELECT key_hash FROM ${this.qualifiedTableName} ORDER BY last_accessed ASC LIMIT $1)`,
[this.evictionBatchSize],
);
}
@@ -275,9 +281,14 @@ export class PersistentStorage implements CacheStorage {
/** Run migrations for the persistent storage */
private async runMigrations(): Promise {
- try {
- await this.pool.query(`
- CREATE TABLE IF NOT EXISTS ${this.tableName} (
+ const steps = [
+ {
+ name: "create schema",
+ query: `CREATE SCHEMA IF NOT EXISTS ${this.schemaName}`,
+ },
+ {
+ name: "create table",
+ query: `CREATE TABLE IF NOT EXISTS ${this.qualifiedTableName} (
id BIGSERIAL PRIMARY KEY,
key_hash BIGINT NOT NULL,
key BYTEA NOT NULL,
@@ -286,34 +297,33 @@ export class PersistentStorage implements CacheStorage {
expiry BIGINT NOT NULL,
created_at TIMESTAMP NOT NULL DEFAULT NOW(),
last_accessed TIMESTAMP NOT NULL DEFAULT NOW()
- )
- `);
-
- // unique index on key_hash for fast lookups
- await this.pool.query(
- `CREATE UNIQUE INDEX IF NOT EXISTS idx_${this.tableName}_key_hash ON ${this.tableName} (key_hash);`,
- );
-
- // index on expiry for cleanup queries
- await this.pool.query(
- `CREATE INDEX IF NOT EXISTS idx_${this.tableName}_expiry ON ${this.tableName} (expiry); `,
- );
-
- // index on last_accessed for LRU eviction
- await this.pool.query(
- `CREATE INDEX IF NOT EXISTS idx_${this.tableName}_last_accessed ON ${this.tableName} (last_accessed); `,
- );
-
- // index on byte_size for monitoring
- await this.pool.query(
- `CREATE INDEX IF NOT EXISTS idx_${this.tableName}_byte_size ON ${this.tableName} (byte_size); `,
- );
- } catch (error) {
- logger.error(
- "Error in running migrations for persistent storage: %O",
- error,
- );
- throw InitializationError.migrationFailed(error as Error);
+ )`,
+ },
+ {
+ name: "create index (key_hash)",
+ query: `CREATE UNIQUE INDEX IF NOT EXISTS idx_${this.tableName}_key_hash ON ${this.qualifiedTableName} (key_hash)`,
+ },
+ {
+ name: "create index (expiry)",
+ query: `CREATE INDEX IF NOT EXISTS idx_${this.tableName}_expiry ON ${this.qualifiedTableName} (expiry)`,
+ },
+ {
+ name: "create index (last_accessed)",
+ query: `CREATE INDEX IF NOT EXISTS idx_${this.tableName}_last_accessed ON ${this.qualifiedTableName} (last_accessed)`,
+ },
+ {
+ name: "create index (byte_size)",
+ query: `CREATE INDEX IF NOT EXISTS idx_${this.tableName}_byte_size ON ${this.qualifiedTableName} (byte_size)`,
+ },
+ ];
+
+ for (const step of steps) {
+ try {
+ await this.pool.query(step.query);
+ } catch (error) {
+ logger.error("Migration step '%s' failed: %O", step.name, error);
+ throw InitializationError.migrationFailed(error as Error);
+ }
}
}
}
diff --git a/packages/appkit/src/plugins/lakebase/manifest.json b/packages/appkit/src/plugins/lakebase/manifest.json
index e351b90e..2959c092 100644
--- a/packages/appkit/src/plugins/lakebase/manifest.json
+++ b/packages/appkit/src/plugins/lakebase/manifest.json
@@ -3,10 +3,62 @@
"name": "lakebase",
"displayName": "Lakebase",
"description": "SQL query execution against Databricks Lakebase Autoscaling",
- "onSetupMessage": "Configure environment variables before running or deploying the app.\nSee: https://databricks.github.io/appkit/docs/plugins/lakebase",
"hidden": false,
"resources": {
- "required": [],
+ "required": [
+ {
+ "type": "postgres",
+ "alias": "Postgres",
+ "resourceKey": "postgres",
+ "description": "Lakebase Postgres database for persistent storage",
+ "permission": "CAN_CONNECT_AND_CREATE",
+ "fields": {
+ "branch": {
+ "description": "Full Lakebase Postgres branch resource name. Obtain by running `databricks postgres list-branches projects/{project-id}`, select the desired item from the output array and use its .name value.",
+ "examples": ["projects/{project-id}/branches/{branch-id}"]
+ },
+ "database": {
+ "description": "Full Lakebase Postgres database resource name. Obtain by running `databricks postgres list-databases {branch-name}`, select the desired item from the output array and use its .name value. Requires the branch resource name.",
+ "examples": [
+ "projects/{project-id}/branches/{branch-id}/databases/{database-id}"
+ ]
+ },
+ "host": {
+ "env": "PGHOST",
+ "localOnly": true,
+ "resolve": "postgres:host",
+ "description": "Postgres host for local development. Auto-injected by the platform at deploy time."
+ },
+ "databaseName": {
+ "env": "PGDATABASE",
+ "localOnly": true,
+ "resolve": "postgres:databaseName",
+ "description": "Postgres database name for local development. Auto-injected by the platform at deploy time."
+ },
+ "endpointPath": {
+ "env": "LAKEBASE_ENDPOINT",
+ "bundleIgnore": true,
+ "resolve": "postgres:endpointPath",
+ "description": "Lakebase endpoint resource name. Auto-injected at runtime via app.yaml valueFrom: postgres. For local development, obtain by running `databricks postgres list-endpoints {branch-name}`, select the desired item from the output array and use its .name value.",
+ "examples": [
+ "projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id}"
+ ]
+ },
+ "port": {
+ "env": "PGPORT",
+ "localOnly": true,
+ "value": "5432",
+ "description": "Postgres port. Auto-injected by the platform at deploy time."
+ },
+ "sslmode": {
+ "env": "PGSSLMODE",
+ "localOnly": true,
+ "value": "require",
+ "description": "Postgres SSL mode. Auto-injected by the platform at deploy time."
+ }
+ }
+ }
+ ],
"optional": []
}
}
diff --git a/packages/appkit/src/registry/resource-registry.ts b/packages/appkit/src/registry/resource-registry.ts
index 2fca1a02..fd8c7dfc 100644
--- a/packages/appkit/src/registry/resource-registry.ts
+++ b/packages/appkit/src/registry/resource-registry.ts
@@ -315,6 +315,7 @@ export class ResourceRegistry {
const values: Record = {};
let allSet = true;
for (const [fieldName, fieldDef] of Object.entries(entry.fields)) {
+ if (!fieldDef.env) continue;
const val = process.env[fieldDef.env];
if (val !== undefined && val !== "") {
values[fieldName] = val;
diff --git a/packages/appkit/src/registry/types.generated.ts b/packages/appkit/src/registry/types.generated.ts
index 7461de4d..7e38af9b 100644
--- a/packages/appkit/src/registry/types.generated.ts
+++ b/packages/appkit/src/registry/types.generated.ts
@@ -12,6 +12,7 @@ export enum ResourceType {
UC_FUNCTION = "uc_function",
UC_CONNECTION = "uc_connection",
DATABASE = "database",
+ POSTGRES = "postgres",
GENIE_SPACE = "genie_space",
EXPERIMENT = "experiment",
APP = "app",
@@ -47,6 +48,9 @@ export type UcConnectionPermission = "USE_CONNECTION";
/** Permissions for DATABASE resources */
export type DatabasePermission = "CAN_CONNECT_AND_CREATE";
+/** Permissions for POSTGRES resources */
+export type PostgresPermission = "CAN_CONNECT_AND_CREATE";
+
/** Permissions for GENIE_SPACE resources */
export type GenieSpacePermission =
| "CAN_VIEW"
@@ -71,6 +75,7 @@ export type ResourcePermission =
| UcFunctionPermission
| UcConnectionPermission
| DatabasePermission
+ | PostgresPermission
| GenieSpacePermission
| ExperimentPermission
| AppPermission;
@@ -89,6 +94,7 @@ export const PERMISSION_HIERARCHY_BY_TYPE: Record<
[ResourceType.UC_FUNCTION]: ["EXECUTE"],
[ResourceType.UC_CONNECTION]: ["USE_CONNECTION"],
[ResourceType.DATABASE]: ["CAN_CONNECT_AND_CREATE"],
+ [ResourceType.POSTGRES]: ["CAN_CONNECT_AND_CREATE"],
[ResourceType.GENIE_SPACE]: ["CAN_VIEW", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"],
[ResourceType.EXPERIMENT]: ["CAN_READ", "CAN_EDIT", "CAN_MANAGE"],
[ResourceType.APP]: ["CAN_USE"],
diff --git a/packages/appkit/src/registry/types.ts b/packages/appkit/src/registry/types.ts
index 272b5021..1a5664cb 100644
--- a/packages/appkit/src/registry/types.ts
+++ b/packages/appkit/src/registry/types.ts
@@ -58,9 +58,19 @@ export {
*/
export interface ResourceFieldEntry {
/** Environment variable name for this field */
- env: string;
+ env?: string;
/** Human-readable description for this field */
description?: string;
+ /** When true, this field is excluded from Databricks bundle configuration (databricks.yml) generation. */
+ bundleIgnore?: boolean;
+ /** Example values showing the expected format for this field */
+ examples?: string[];
+ /** When true, this field is only generated for local .env files. The Databricks Apps platform auto-injects it at deploy time. */
+ localOnly?: boolean;
+ /** Static value for this field. Used when no prompted or resolved value exists. */
+ value?: string;
+ /** Named resolver prefixed by resource type (e.g., 'postgres:host'). The CLI resolves this value during the init prompt flow. */
+ resolve?: string;
}
/**
diff --git a/packages/shared/src/cli/commands/plugin/manifest-types.ts b/packages/shared/src/cli/commands/plugin/manifest-types.ts
index 2f11afca..72516a8f 100644
--- a/packages/shared/src/cli/commands/plugin/manifest-types.ts
+++ b/packages/shared/src/cli/commands/plugin/manifest-types.ts
@@ -5,8 +5,13 @@
*/
export interface ResourceFieldEntry {
- env: string;
+ env?: string;
description?: string;
+ bundleIgnore?: boolean;
+ examples?: string[];
+ localOnly?: boolean;
+ value?: string;
+ resolve?: string;
}
export interface ResourceRequirement {
diff --git a/packages/shared/src/plugin.ts b/packages/shared/src/plugin.ts
index 78837912..94620f7e 100644
--- a/packages/shared/src/plugin.ts
+++ b/packages/shared/src/plugin.ts
@@ -103,9 +103,19 @@ export interface PluginManifest {
*/
export interface ResourceFieldEntry {
/** Environment variable name for this field */
- env: string;
+ env?: string;
/** Human-readable description for this field */
description?: string;
+ /** When true, this field is excluded from Databricks bundle configuration (databricks.yml) generation. */
+ bundleIgnore?: boolean;
+ /** Example values showing the expected format for this field */
+ examples?: string[];
+ /** When true, this field is only generated for local .env files. The Databricks Apps platform auto-injects it at deploy time. */
+ localOnly?: boolean;
+ /** Static value for this field. Used when no prompted or resolved value exists. */
+ value?: string;
+ /** Named resolver prefixed by resource type (e.g., 'postgres:host'). The CLI resolves this value during the init prompt flow. */
+ resolve?: string;
}
/**
diff --git a/packages/shared/src/schemas/plugin-manifest.schema.json b/packages/shared/src/schemas/plugin-manifest.schema.json
index e4c70ed3..cae2e688 100644
--- a/packages/shared/src/schemas/plugin-manifest.schema.json
+++ b/packages/shared/src/schemas/plugin-manifest.schema.json
@@ -111,6 +111,7 @@
"uc_function",
"uc_connection",
"database",
+ "postgres",
"genie_space",
"experiment",
"app"
@@ -162,6 +163,11 @@
"enum": ["CAN_CONNECT_AND_CREATE"],
"description": "Permission for database resources"
},
+ "postgresPermission": {
+ "type": "string",
+ "enum": ["CAN_CONNECT_AND_CREATE"],
+ "description": "Permission for Postgres resources"
+ },
"genieSpacePermission": {
"type": "string",
"enum": ["CAN_VIEW", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"],
@@ -179,7 +185,6 @@
},
"resourceFieldEntry": {
"type": "object",
- "required": ["env"],
"properties": {
"env": {
"type": "string",
@@ -190,20 +195,37 @@
"description": {
"type": "string",
"description": "Human-readable description for this field"
+ },
+ "bundleIgnore": {
+ "type": "boolean",
+ "default": false,
+ "description": "When true, this field is excluded from Databricks bundle configuration (databricks.yml) generation."
+ },
+ "examples": {
+ "type": "array",
+ "items": { "type": "string" },
+ "description": "Example values showing the expected format for this field"
+ },
+ "localOnly": {
+ "type": "boolean",
+ "default": false,
+ "description": "When true, this field is only generated for local .env files. The Databricks Apps platform auto-injects it at deploy time."
+ },
+ "value": {
+ "type": "string",
+ "description": "Static value for this field. Used when no prompted or resolved value exists."
+ },
+ "resolve": {
+ "type": "string",
+ "pattern": "^[a-z_]+:[a-zA-Z]+$",
+ "description": "Named resolver prefixed by resource type (e.g., 'postgres:host'). The CLI resolves this value during the init prompt flow."
}
},
"additionalProperties": false
},
"resourceRequirement": {
"type": "object",
- "required": [
- "type",
- "alias",
- "resourceKey",
- "description",
- "permission",
- "fields"
- ],
+ "required": ["type", "alias", "resourceKey", "description", "permission"],
"properties": {
"type": {
"$ref": "#/$defs/resourceType"
@@ -337,6 +359,17 @@
}
}
},
+ {
+ "if": {
+ "properties": { "type": { "const": "postgres" } },
+ "required": ["type"]
+ },
+ "then": {
+ "properties": {
+ "permission": { "$ref": "#/$defs/postgresPermission" }
+ }
+ }
+ },
{
"if": {
"properties": { "type": { "const": "genie_space" } },
diff --git a/template/.env.example.tmpl b/template/.env.example.tmpl
index 7ab495cb..35ffed20 100644
--- a/template/.env.example.tmpl
+++ b/template/.env.example.tmpl
@@ -2,14 +2,6 @@ DATABRICKS_HOST=https://...
{{- if .dotEnv.example}}
{{.dotEnv.example}}
{{- end}}
-{{- if .plugins.lakebase}}
-PGHOST=your-lakebase-host.databricks.com
-PGDATABASE=databricks_postgres
-# Run: databricks postgres list-endpoints projects/{project-id}/branches/{branch-id}
-LAKEBASE_ENDPOINT=projects//branches//endpoints/
-# PGUSER=your_user # optional, defaults to DATABRICKS_CLIENT_ID
-PGSSLMODE=require
-{{- end}}
DATABRICKS_APP_PORT=8000
DATABRICKS_APP_NAME={{.projectName}}
FLASK_RUN_HOST=0.0.0.0
diff --git a/template/.env.tmpl b/template/.env.tmpl
index 6b40183f..afa7bb15 100644
--- a/template/.env.tmpl
+++ b/template/.env.tmpl
@@ -6,13 +6,6 @@ DATABRICKS_HOST={{.workspaceHost}}
{{- if .dotEnv.content}}
{{.dotEnv.content}}
{{- end}}
-{{- if .plugins.lakebase}}
-PGHOST='' # Copy from the Lakebase Postgres UI
-PGDATABASE='databricks_postgres' # Copy from the Lakebase Postgres UI
-LAKEBASE_ENDPOINT='' # Run: databricks postgres list-endpoints projects/{project-id}/branches/{branch-id}
-# PGUSER='' # optional, defaults to DATABRICKS_CLIENT_ID
-PGSSLMODE=require
-{{- end}}
DATABRICKS_APP_PORT=8000
DATABRICKS_APP_NAME={{.projectName}}
FLASK_RUN_HOST=localhost
diff --git a/template/app.yaml.tmpl b/template/app.yaml.tmpl
index f4ac5ed4..66311d12 100644
--- a/template/app.yaml.tmpl
+++ b/template/app.yaml.tmpl
@@ -1,17 +1,5 @@
command: ['npm', 'run', 'start']
-env:
{{- if .appEnv}}
+env:
{{.appEnv}}
{{- end}}
-{{- if .plugins.lakebase}}
- - name: PGHOST
- value: "" # Copy from the Lakebase Postgres UI
- - name: PGDATABASE
- value: "databricks_postgres" # Copy from the Lakebase Postgres UI
- - name: LAKEBASE_ENDPOINT
- value: "" # Run: databricks postgres list-endpoints projects/{project-id}/branches/{branch-id}
- - name: PGSSLMODE
- value: "require"
-# - name: PGUSER
-# value: "" # optional, defaults to DATABRICKS_CLIENT_ID
-{{- end}}
\ No newline at end of file
diff --git a/template/appkit.plugins.json b/template/appkit.plugins.json
index f5f20df0..cf60a8af 100644
--- a/template/appkit.plugins.json
+++ b/template/appkit.plugins.json
@@ -80,10 +80,64 @@
"description": "SQL query execution against Databricks Lakebase Autoscaling",
"package": "@databricks/appkit",
"resources": {
- "required": [],
+ "required": [
+ {
+ "type": "postgres",
+ "alias": "Postgres",
+ "resourceKey": "postgres",
+ "description": "Lakebase Postgres database for persistent storage",
+ "permission": "CAN_CONNECT_AND_CREATE",
+ "fields": {
+ "branch": {
+ "description": "Full Lakebase Postgres branch resource name. Obtain by running `databricks postgres list-branches projects/{project-id}`, select the desired item from the output array and use its .name value.",
+ "examples": [
+ "projects/{project-id}/branches/{branch-id}"
+ ]
+ },
+ "database": {
+ "description": "Full Lakebase Postgres database resource name. Obtain by running `databricks postgres list-databases {branch-name}`, select the desired item from the output array and use its .name value. Requires the branch resource name.",
+ "examples": [
+ "projects/{project-id}/branches/{branch-id}/databases/{database-id}"
+ ]
+ },
+ "host": {
+ "env": "PGHOST",
+ "localOnly": true,
+ "resolve": "postgres:host",
+ "description": "Postgres host for local development. Auto-injected by the platform at deploy time."
+ },
+ "databaseName": {
+ "env": "PGDATABASE",
+ "localOnly": true,
+ "resolve": "postgres:databaseName",
+ "description": "Postgres database name for local development. Auto-injected by the platform at deploy time."
+ },
+ "endpointPath": {
+ "env": "LAKEBASE_ENDPOINT",
+ "bundleIgnore": true,
+ "resolve": "postgres:endpointPath",
+ "description": "Lakebase endpoint resource name. Auto-injected at runtime via app.yaml valueFrom: postgres. For local development, obtain by running `databricks postgres list-endpoints {branch-name}`, select the desired item from the output array and use its .name value.",
+ "examples": [
+ "projects/{project-id}/branches/{branch-id}/endpoints/{endpoint-id}"
+ ]
+ },
+ "port": {
+ "env": "PGPORT",
+ "localOnly": true,
+ "value": "5432",
+ "description": "Postgres port. Auto-injected by the platform at deploy time."
+ },
+ "sslmode": {
+ "env": "PGSSLMODE",
+ "localOnly": true,
+ "value": "require",
+ "description": "Postgres SSL mode. Auto-injected by the platform at deploy time."
+ }
+ }
+ }
+ ],
"optional": []
- },
- "onSetupMessage": "Configure environment variables before running or deploying the app.\nSee: https://databricks.github.io/appkit/docs/plugins/lakebase"
+ }
},
"server": {
"name": "server",
diff --git a/template/server/routes/lakebase/todo-routes.ts b/template/server/routes/lakebase/todo-routes.ts
index a9a92923..32c47ab8 100644
--- a/template/server/routes/lakebase/todo-routes.ts
+++ b/template/server/routes/lakebase/todo-routes.ts
@@ -11,8 +11,15 @@ interface AppKitWithLakebase {
};
}
+const TABLE_EXISTS_SQL = `
+ SELECT 1 FROM information_schema.tables
+ WHERE table_schema = 'app' AND table_name = 'todos'
+`;
+
+const SETUP_SCHEMA_SQL = `CREATE SCHEMA IF NOT EXISTS app`;
+
const CREATE_TABLE_SQL = `
- CREATE TABLE IF NOT EXISTS todos (
+ CREATE TABLE IF NOT EXISTS app.todos (
id SERIAL PRIMARY KEY,
title TEXT NOT NULL,
completed BOOLEAN NOT NULL DEFAULT false,
@@ -23,13 +30,26 @@ const CREATE_TABLE_SQL = `
const CreateTodoBody = z.object({ title: z.string().min(1) });
export async function setupSampleLakebaseRoutes(appkit: AppKitWithLakebase) {
- await appkit.lakebase.query(CREATE_TABLE_SQL);
+ try {
+ const { rows } = await appkit.lakebase.query(TABLE_EXISTS_SQL);
+ if (rows.length > 0) {
+ console.log('[lakebase] Table app.todos already exists, skipping setup');
+ } else {
+ await appkit.lakebase.query(SETUP_SCHEMA_SQL);
+ await appkit.lakebase.query(CREATE_TABLE_SQL);
+ console.log('[lakebase] Created schema and table app.todos');
+ }
+ } catch (err) {
+ console.warn('[lakebase] Database setup failed:', (err as Error).message);
+ console.warn('[lakebase] Routes will be registered but may return errors');
+ console.warn('[lakebase] See https://databricks.github.io/appkit/docs/plugins/lakebase#database-permissions for troubleshooting');
+ }
appkit.server.extend((app) => {
app.get('/api/lakebase/todos', async (_req, res) => {
try {
const result = await appkit.lakebase.query(
- 'SELECT id, title, completed, created_at FROM todos ORDER BY created_at DESC',
+ 'SELECT id, title, completed, created_at FROM app.todos ORDER BY created_at DESC',
);
res.json(result.rows);
} catch (err) {
@@ -46,7 +66,7 @@ export async function setupSampleLakebaseRoutes(appkit: AppKitWithLakebase) {
return;
}
const result = await appkit.lakebase.query(
- 'INSERT INTO todos (title) VALUES ($1) RETURNING id, title, completed, created_at',
+ 'INSERT INTO app.todos (title) VALUES ($1) RETURNING id, title, completed, created_at',
[parsed.data.title.trim()],
);
res.status(201).json(result.rows[0]);
@@ -64,7 +84,7 @@ export async function setupSampleLakebaseRoutes(appkit: AppKitWithLakebase) {
return;
}
const result = await appkit.lakebase.query(
- 'UPDATE todos SET completed = NOT completed WHERE id = $1 RETURNING id, title, completed, created_at',
+ 'UPDATE app.todos SET completed = NOT completed WHERE id = $1 RETURNING id, title, completed, created_at',
[id],
);
if (result.rows.length === 0) {
@@ -86,7 +106,7 @@ export async function setupSampleLakebaseRoutes(appkit: AppKitWithLakebase) {
return;
}
const result = await appkit.lakebase.query(
- 'DELETE FROM todos WHERE id = $1 RETURNING id',
+ 'DELETE FROM app.todos WHERE id = $1 RETURNING id',
[id],
);
if (result.rows.length === 0) {
diff --git a/tools/generate-app-templates.ts b/tools/generate-app-templates.ts
index 4b2cbf77..009ddb62 100644
--- a/tools/generate-app-templates.ts
+++ b/tools/generate-app-templates.ts
@@ -63,6 +63,8 @@ const APP_TEMPLATES: AppTemplate[] = [
set: {
"analytics.sql-warehouse.id": "placeholder",
"genie.genie-space.id": "placeholder",
+ "lakebase.postgres.branch": "placeholder",
+ "lakebase.postgres.database": "placeholder",
},
description:
"Full-stack Node.js app with SQL analytics dashboards, Genie AI conversations, and Lakebase Autoscaling (Postgres) CRUD",
@@ -87,6 +89,10 @@ const APP_TEMPLATES: AppTemplate[] = [
{
name: "appkit-lakebase",
features: ["lakebase"],
+ set: {
+ "lakebase.postgres.branch": "placeholder",
+ "lakebase.postgres.database": "placeholder",
+ },
description:
"Node.js app with Lakebase Autoscaling (Postgres) CRUD operations",
},