From 9e3195479c81eab31a2ce74b6efe27579f1427e8 Mon Sep 17 00:00:00 2001 From: Lagrang3 Date: Tue, 7 Apr 2026 14:51:34 +0100 Subject: [PATCH 1/3] sql: make LIMIT_PER_LIST into a dev option ... for regression tests. Changelog-None Signed-off-by: Lagrang3 --- plugins/sql.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/plugins/sql.c b/plugins/sql.c index cd0a52200d9f..a6b9b5472973 100644 --- a/plugins/sql.c +++ b/plugins/sql.c @@ -162,6 +162,7 @@ struct sql { int gosstore_fd ; size_t gosstore_nodes_off, gosstore_channels_off; u64 next_rowid; + u32 limit_per_list; /* This is an aux_command for all our watches */ struct command *waitcmd; @@ -1715,14 +1716,13 @@ static const char *db_table_name(const tal_t *ctx, const char *cmdname) return ret; } -#define LIMIT_PER_LIST 10000 - static struct command_result *limited_list_done(struct command *cmd, const char *method, const char *buf, const jsmntok_t *result, struct db_query *dbq) { + struct sql *sql = sql_of(dbq->cmd->plugin); struct table_desc *td = dbq->tables[0]; struct command_result *ret; size_t num_entries; @@ -1735,7 +1735,7 @@ static struct command_result *limited_list_done(struct command *cmd, return ret; /* If we got the number we asked for, we need to ask again. */ - return one_refresh_done(cmd, dbq, num_entries == LIMIT_PER_LIST); + return one_refresh_done(cmd, dbq, num_entries == sql->limit_per_list); } /* The simplest case: append-only lists */ @@ -1743,6 +1743,7 @@ static struct command_result *refresh_by_created_index(struct command *cmd, struct table_desc *td, struct db_query *dbq) { + struct sql *sql = sql_of(dbq->cmd->plugin); struct out_req *req; /* Since we're relying on watches, mark refreshing unnecessary to start */ @@ -1754,7 +1755,7 @@ static struct command_result *refresh_by_created_index(struct command *cmd, dbq); json_add_string(req->js, "index", "created"); json_add_u64(req->js, "start", td->last_created_index + 1); - json_add_u64(req->js, "limit", LIMIT_PER_LIST); + json_add_u64(req->js, "limit", sql->limit_per_list); return send_outreq(req); } @@ -2216,11 +2217,16 @@ int main(int argc, char *argv[]) sql->gosstore_fd = -1; sql->gosstore_nodes_off = sql->gosstore_channels_off = 0; sql->next_rowid = 1; + sql->limit_per_list = 10000; plugin_main(argv, init, take(sql), PLUGIN_RESTARTABLE, true, NULL, commands, ARRAY_SIZE(commands), NULL, 0, NULL, 0, NULL, 0, plugin_option_dev("dev-sqlfilename", "string", "Use on-disk sqlite3 file instead of in memory (e.g. debugging)", charp_option, NULL, &sql->dbfilename), + plugin_option_dev("dev-sqllistlimit", + "int", + "A variable setting for how many chainmoves/channelmoves we fetch at once", + u32_option, NULL, &sql->limit_per_list), NULL); } From e8b34e3219caf9ae094b1efc2e58ba43cbb2344f Mon Sep 17 00:00:00 2001 From: Lagrang3 Date: Tue, 7 Apr 2026 14:52:40 +0100 Subject: [PATCH 2/3] sql: add test that triggers bug on channelmoves Updating channelmoves table makes sql to crash if there are more elements in listchannelmoves than limit_per_list. Changelog-None Signed-off-by: Lagrang3 --- tests/test_plugin.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/test_plugin.py b/tests/test_plugin.py index df6193db96ab..a2f197bc05e4 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -4410,6 +4410,17 @@ def test_sql_deprecated(node_factory, bitcoind): assert ret == {'rows': [[1]]} +@pytest.mark.xfail(strict=True) +def test_sql_limit_per_list(node_factory): + l1, l2, l3 = node_factory.line_graph( + 3, wait_for_announce=True, opts=[{}, {"dev-sqllistlimit": 10}, {}] + ) + for i in range(20): + inv = l3.rpc.invoice(1000, f"inv-{i}", f"inv-{i}")["bolt11"] + l1.rpc.xpay(inv) + l2.rpc.sql("SELECT created_index, payment_hash FROM channelmoves") + + def test_plugin_persist_option(node_factory): """test that options from config file get remembered across plugin stop/start""" plugin_path = os.path.join(os.getcwd(), 'contrib/plugins/helloworld.py') From 711dfe16d14094ff3dc0b01bf5a5d83cdb975ef1 Mon Sep 17 00:00:00 2001 From: Lagrang3 Date: Fri, 3 Apr 2026 13:23:57 +0100 Subject: [PATCH 3/3] sql: fix BROKEN for large channelmoves tables. sql: plugins/sql.c:1749: refresh_by_created_index: Assertion `td->refresh_needs != REFRESH_UNNECESSARY' failed. sql: FATAL SIGNAL 6 (version v26.04rc2) 0x5618e147892e send_backtrace common/daemon.c:38 0x5618e14789bb crashdump common/daemon.c:83 0x7f54d10ea04f ??? ./signal/../sysdeps/unix/sysv/linux/x86_64/libc_sigaction.c:0 0x7f54d1138eec __pthread_kill_implementation ./nptl/pthread_kill.c:44 0x7f54d10e9fb1 __GI_raise ../sysdeps/posix/raise.c:26 0x7f54d10d4471 __GI_abort ./stdlib/abort.c:79 0x7f54d10d4394 __assert_fail_base ./assert/assert.c:94 0x7f54d10e2ec1 __GI___assert_fail ./assert/assert.c:103 0x5618e1472725 refresh_by_created_index plugins/sql.c:1749 0x5618e14736af one_refresh_done plugins/sql.c:579 0x5618e1473932 limited_list_done plugins/sql.c:1738 0x5618e1477418 handle_rpc_reply plugins/libplugin.c:1093 0x5618e1477548 rpc_conn_read_response plugins/libplugin.c:1398 0x5618e147ec71 next_plan ccan/ccan/io/io.c:60 0x5618e147ef90 do_plan ccan/ccan/io/io.c:422 0x5618e147f049 io_ready ccan/ccan/io/io.c:439 0x5618e147ffae io_loop ccan/ccan/io/poll.c:470 0x5618e14786af plugin_main plugins/libplugin.c:2461 0x5618e1474b12 main plugins/sql.c:2219 0x7f54d10d5249 __libc_start_call_main ../sysdeps/nptl/libc_start_call_main.h:58 0x7f54d10d5304 __libc_start_main_impl ../csu/libc-start.c:360 0x5618e1470710 ??? _start+0x20:0 0xffffffffffffffff ??? ???:0 Changelog-Fixed: sql: fix BROKEN for large channelmoves tables. Signed-off-by: Lagrang3 --- plugins/sql.c | 7 +++++-- tests/test_plugin.py | 1 - 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/plugins/sql.c b/plugins/sql.c index a6b9b5472973..d3e810d14dc7 100644 --- a/plugins/sql.c +++ b/plugins/sql.c @@ -1726,6 +1726,7 @@ static struct command_result *limited_list_done(struct command *cmd, struct table_desc *td = dbq->tables[0]; struct command_result *ret; size_t num_entries; + bool was_limited; ret = process_json_result(cmd, buf, result, td, false, &td->last_created_index, @@ -1734,8 +1735,11 @@ static struct command_result *limited_list_done(struct command *cmd, if (ret) return ret; + was_limited = num_entries == sql->limit_per_list; + if (!was_limited) + td->refresh_needs = REFRESH_UNNECESSARY; /* If we got the number we asked for, we need to ask again. */ - return one_refresh_done(cmd, dbq, num_entries == sql->limit_per_list); + return one_refresh_done(cmd, dbq, was_limited); } /* The simplest case: append-only lists */ @@ -1748,7 +1752,6 @@ static struct command_result *refresh_by_created_index(struct command *cmd, /* Since we're relying on watches, mark refreshing unnecessary to start */ assert(td->refresh_needs != REFRESH_UNNECESSARY); - td->refresh_needs = REFRESH_UNNECESSARY; req = jsonrpc_request_start(cmd, td->cmdname, limited_list_done, forward_error, diff --git a/tests/test_plugin.py b/tests/test_plugin.py index a2f197bc05e4..3f4f11cbe90f 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -4410,7 +4410,6 @@ def test_sql_deprecated(node_factory, bitcoind): assert ret == {'rows': [[1]]} -@pytest.mark.xfail(strict=True) def test_sql_limit_per_list(node_factory): l1, l2, l3 = node_factory.line_graph( 3, wait_for_announce=True, opts=[{}, {"dev-sqllistlimit": 10}, {}]