@@ -14,12 +14,18 @@ import * as pgwire from '@powersync/service-jpgwire';
1414import { SqliteRow } from '@powersync/service-sync-rules' ;
1515
1616import { PgManager } from '@module/replication/PgManager.js' ;
17- import { createCoreReplicationMetrics , initializeCoreReplicationMetrics , storage } from '@powersync/service-core' ;
17+ import { ReplicationAbortedError } from '@powersync/lib-services-framework' ;
18+ import {
19+ createCoreReplicationMetrics ,
20+ initializeCoreReplicationMetrics ,
21+ reduceBucket ,
22+ storage
23+ } from '@powersync/service-core' ;
1824import { METRICS_HELPER , test_utils } from '@powersync/service-core-tests' ;
1925import * as mongo_storage from '@powersync/service-module-mongodb-storage' ;
2026import * as postgres_storage from '@powersync/service-module-postgres-storage' ;
2127import * as timers from 'node:timers/promises' ;
22- import { ReplicationAbortedError } from '@powersync/lib-services-framework ' ;
28+ import { WalStreamTestContext } from './wal_stream_utils.js ' ;
2329
2430describe . skipIf ( ! ( env . CI || env . SLOW_TESTS ) ) ( 'slow tests' , function ( ) {
2531 describeWithStorage ( { timeout : 120_000 } , function ( factory ) {
@@ -300,7 +306,7 @@ bucket_definitions:
300306 //
301307 // If the first LSN does not correctly match with the first replication transaction,
302308 // we may miss some updates.
303- test ( 'repeated initial replication' , { timeout : TEST_DURATION_MS + TIMEOUT_MARGIN_MS } , async ( ) => {
309+ test ( 'repeated initial replication (1) ' , { timeout : TEST_DURATION_MS + TIMEOUT_MARGIN_MS } , async ( ) => {
304310 const pool = await connectPgPool ( ) ;
305311 await clearTestDb ( pool ) ;
306312 await using f = await factory ( ) ;
@@ -348,7 +354,7 @@ bucket_definitions:
348354
349355 await storage . clear ( ) ;
350356
351- // 3. Start initial replication, then streaming, but don't wait for any of this
357+ // 3. Start replication, but don't wait for it
352358 let initialReplicationDone = false ;
353359 streamPromise = walStream . replicate ( ) ;
354360 walStream
@@ -408,4 +414,94 @@ bucket_definitions:
408414 await connections . end ( ) ;
409415 }
410416 } ) ;
417+
418+ // Test repeatedly performing initial replication while deleting data.
419+ //
420+ // This specifically checks for data in the initial snapshot being deleted while snapshotting.
421+ test ( 'repeated initial replication with deletes' , { timeout : TEST_DURATION_MS + TIMEOUT_MARGIN_MS } , async ( ) => {
422+ const syncRuleContent = `
423+ bucket_definitions:
424+ global:
425+ data:
426+ - SELECT id, description FROM "test_data"
427+ ` ;
428+
429+ const start = Date . now ( ) ;
430+ let i = 0 ;
431+
432+ while ( Date . now ( ) - start < TEST_DURATION_MS ) {
433+ i += 1 ;
434+
435+ // 1. Each iteration starts with a clean slate
436+ await using context = await WalStreamTestContext . open ( factory , {
437+ walStreamOptions : { snapshotChunkLength : 100 }
438+ } ) ;
439+ const pool = context . pool ;
440+
441+ // Introduce an artificial delay in snapshot queries, to make it more likely to reproduce an
442+ // issue.
443+ const originalSnapshotConnectionFn = context . connectionManager . snapshotConnection ;
444+ context . connectionManager . snapshotConnection = async ( ) => {
445+ const conn = await originalSnapshotConnectionFn . call ( context . connectionManager ) ;
446+ // Wrap streaming query to add delays to snapshots
447+ const originalStream = conn . stream ;
448+ conn . stream = async function * ( ...args : any [ ] ) {
449+ const delay = Math . random ( ) * 20 ;
450+ yield * originalStream . call ( this , ...args ) ;
451+ await new Promise ( ( resolve ) => setTimeout ( resolve , delay ) ) ;
452+ } ;
453+ return conn ;
454+ } ;
455+
456+ await pool . query ( `CREATE TABLE test_data(id uuid primary key default uuid_generate_v4(), description text)` ) ;
457+ await context . updateSyncRules ( syncRuleContent ) ;
458+
459+ let statements : pgwire . Statement [ ] = [ ] ;
460+
461+ const n = Math . floor ( Math . random ( ) * 200 ) ;
462+ for ( let i = 0 ; i < n ; i ++ ) {
463+ statements . push ( {
464+ statement : `INSERT INTO test_data(description) VALUES('test_init') RETURNING id`
465+ } ) ;
466+ }
467+ const results = await pool . query ( ...statements ) ;
468+ const ids = new Set (
469+ results . results . map ( ( sub ) => {
470+ return sub . rows [ 0 ] [ 0 ] as string ;
471+ } )
472+ ) ;
473+
474+ // 3. Start replication, but don't wait for it
475+ let initialReplicationDone = false ;
476+
477+ streamPromise = context . replicateSnapshot ( ) . finally ( ( ) => {
478+ initialReplicationDone = true ;
479+ } ) ;
480+
481+ // 4. While initial replication is still running, delete random rows
482+ while ( ! initialReplicationDone && ids . size > 0 ) {
483+ let statements : pgwire . Statement [ ] = [ ] ;
484+
485+ const m = Math . floor ( Math . random ( ) * 10 ) + 1 ;
486+ const idArray = Array . from ( ids ) ;
487+ for ( let i = 0 ; i < m ; i ++ ) {
488+ const id = idArray [ Math . floor ( Math . random ( ) * idArray . length ) ] ;
489+ statements . push ( {
490+ statement : `DELETE FROM test_data WHERE id = $1` ,
491+ params : [ { type : 'uuid' , value : id } ]
492+ } ) ;
493+ ids . delete ( id ) ;
494+ }
495+ await pool . query ( ...statements ) ;
496+ await new Promise ( ( resolve ) => setTimeout ( resolve , Math . random ( ) * 10 ) ) ;
497+ }
498+
499+ await streamPromise ;
500+
501+ // 5. Once initial replication is done, wait for the streaming changes to complete syncing.
502+ const data = await context . getBucketData ( 'global[]' , 0n ) ;
503+ const normalized = reduceBucket ( data ) . filter ( ( op ) => op . op !== 'CLEAR' ) ;
504+ expect ( normalized . length ) . toEqual ( ids . size ) ;
505+ }
506+ } ) ;
411507}
0 commit comments