diff --git a/.env.http.example b/.env.http.example index a7b8cd7f0a..a1e835b6e5 100644 --- a/.env.http.example +++ b/.env.http.example @@ -62,6 +62,11 @@ OPERATOR_KEY_MAIN= # Operator private key used to sign transaction # ========== TRANSACTION POOL ======== # PENDING_TRANSACTION_STORAGE_TTL=30 # Time-to-live (TTL) in seconds for transaction payloads stored in Redis +# ========== LOCK SERVICE =========== +# LOCK_MAX_HOLD_MS=30000 # Maximum time in milliseconds a lock can be held before automatic force-release +# LOCAL_LOCK_MAX_ENTRIES=1000 # Maximum number of lock entries stored in memory +# LOCK_QUEUE_POLL_INTERVAL_MS=50 # Interval in milliseconds between queue position checks when waiting for a lock + # ========== HBAR RATE LIMITING ========== # HBAR_RATE_LIMIT_TINYBAR=25000000000 # Total HBAR budget (250 HBARs) # HBAR_RATE_LIMIT_DURATION=86400000 # HBAR budget limit duration (1 day) diff --git a/.env.ws.example b/.env.ws.example index c3359b67af..14f48fe413 100644 --- a/.env.ws.example +++ b/.env.ws.example @@ -59,6 +59,11 @@ SUBSCRIPTIONS_ENABLED=true # Must be true for the WebSocket server to func # ========== TRANSACTION POOL ======== # PENDING_TRANSACTION_STORAGE_TTL=30 # Time-to-live (TTL) in seconds for transaction payloads stored in Redis +# ========== LOCK SERVICE =========== +# LOCK_MAX_HOLD_MS=30000 # Maximum time in milliseconds a lock can be held before automatic force-release +# LOCAL_LOCK_MAX_ENTRIES=1000 # Maximum number of lock entries stored in memory +# LOCK_QUEUE_POLL_INTERVAL_MS=50 # Interval in milliseconds between queue position checks when waiting for a lock + # ========== OTHER SETTINGS ========== # CLIENT_TRANSPORT_SECURITY=false # Enable or disable TLS for both networks # USE_ASYNC_TX_PROCESSING=true # If true, returns tx hash immediately after prechecks diff --git a/charts/hedera-json-rpc-relay/values.yaml b/charts/hedera-json-rpc-relay/values.yaml index e0bc043ae3..63c68539b0 100644 --- a/charts/hedera-json-rpc-relay/values.yaml +++ b/charts/hedera-json-rpc-relay/values.yaml @@ -123,6 +123,11 @@ config: # REDIS_RECONNECT_DELAY_MS: # MULTI_SET: + # ========== LOCK SERVICE CONFIGURATION ========== + # LOCK_MAX_HOLD_MS: + # LOCAL_LOCK_MAX_ENTRIES: + # LOCK_QUEUE_POLL_INTERVAL_MS: + # ========== DEVELOPMENT & TESTING ========== # LOG_LEVEL: 'trace' diff --git a/docs/configuration.md b/docs/configuration.md index d0239e4477..6d701eea4a 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -69,7 +69,6 @@ Unless you need to set a non-default value, it is recommended to only populate o | `JUMBO_TX_ENABLED` | "true" | Controls how large transactions are handled during `eth_sendRawTransaction`. When set to `true`, transactions up to 128KB can be sent directly to consensus nodes without using Hedera File Service (HFS), as long as contract bytecode doesn't exceed 24KB. When set to `false`, all transactions containing contract deployments use the traditional HFS approach. This feature leverages the increased transaction size limit to simplify processing of standard Ethereum transactions. | | `LIMIT_DURATION` | "60000" | The maximum duration in ms applied to IP-method based rate limits. | | `LOCAL_LOCK_MAX_ENTRIES` | "1000" | Maximum number of lock entries stored in memory. Prevents unbounded memory growth. | -| `LOCAL_LOCK_MAX_LOCK_TIME` | "30000" | Timer to auto-release if lock not manually released (in ms). | | `MAX_GAS_ALLOWANCE_HBAR` | "0" | The maximum amount, in hbars, that the JSON-RPC Relay is willing to pay to complete the transaction in case the senders don't provide enough funds. Please note, in case of fully subsidized transactions, the sender must set the gas price to `0` and the JSON-RPC Relay must configure the `MAX_GAS_ALLOWANCE_HBAR` parameter high enough to cover the entire transaction cost. | | `MAX_TRANSACTION_FEE_THRESHOLD` | "15000000" | Used to set the max transaction fee. This is the HAPI fee which is paid by the relay operator account. | | `MIRROR_NODE_AGENT_CACHEABLE_DNS` | "true" | Flag to set if the mirror node agent should cacheable DNS lookups, using better-lookup library. | @@ -107,6 +106,8 @@ Unless you need to set a non-default value, it is recommended to only populate o | `TX_DEFAULT_GAS` | "400000" | Default gas for transactions that do not specify gas. | | `TXPOOL_API_ENABLED` | "false" | Enables all txpool related methods. | | `USE_ASYNC_TX_PROCESSING` | "true" | Set to `true` to enable `eth_sendRawTransaction` to return the transaction hash immediately after passing all prechecks, while processing the transaction asynchronously in the background. | +| `LOCK_MAX_HOLD_MS` | "30000" | Maximum time in milliseconds a lock can be held before automatic force-release. This TTL prevents deadlocks when transaction processing hangs or crashes. Default is 30 seconds. | +| `LOCK_QUEUE_POLL_INTERVAL_MS` | "50" | Interval in milliseconds between queue position checks when waiting for a lock. Lower values provide faster lock acquisition but increase Redis load. Default is 50ms. | | `USE_MIRROR_NODE_MODULARIZED_SERVICES` | null | Controls routing of Mirror Node traffic through modularized services. When set to `true`, enables routing a percentage of traffic to modularized services. When set to `false`, ensures traffic follows the traditional non-modularized flow. When not set (i.e. `null` by default), no specific routing preference is applied. As Mirror Node gradually transitions to a fully modularized architecture across all networks, this setting will eventually default to `true`. | ## Server diff --git a/packages/config-service/src/services/globalConfig.ts b/packages/config-service/src/services/globalConfig.ts index dad1b6fbca..1cc0ff8ef0 100644 --- a/packages/config-service/src/services/globalConfig.ts +++ b/packages/config-service/src/services/globalConfig.ts @@ -368,11 +368,6 @@ const _CONFIG = { required: false, defaultValue: 1000, }, - LOCAL_LOCK_MAX_LOCK_TIME: { - type: 'number', - required: false, - defaultValue: 30000, - }, LOG_LEVEL: { type: 'string', required: false, @@ -659,6 +654,16 @@ const _CONFIG = { required: false, defaultValue: true, }, + LOCK_MAX_HOLD_MS: { + type: 'number', + required: false, + defaultValue: 30000, + }, + LOCK_QUEUE_POLL_INTERVAL_MS: { + type: 'number', + required: false, + defaultValue: 50, + }, USE_MIRROR_NODE_MODULARIZED_SERVICES: { type: 'boolean', required: false, diff --git a/packages/relay/src/lib/services/lockService/LocalLockStrategy.ts b/packages/relay/src/lib/services/lockService/LocalLockStrategy.ts index 5b7c681e99..d19ef219c0 100644 --- a/packages/relay/src/lib/services/lockService/LocalLockStrategy.ts +++ b/packages/relay/src/lib/services/lockService/LocalLockStrategy.ts @@ -6,6 +6,8 @@ import { randomUUID } from 'crypto'; import { LRUCache } from 'lru-cache'; import { Logger } from 'pino'; +import { LockService } from './LockService'; + /** * Represents the internal state for a lock associated with a given address. */ @@ -71,7 +73,7 @@ export class LocalLockStrategy { // Start a 30-second timer to auto-release if lock not manually released state.lockTimeoutId = setTimeout(() => { this.forceReleaseExpiredLock(address, sessionKey); - }, ConfigService.get('LOCAL_LOCK_MAX_LOCK_TIME')); + }, ConfigService.get('LOCK_MAX_HOLD_MS')); return sessionKey; } @@ -83,13 +85,13 @@ export class LocalLockStrategy { * @param sessionKey - The session key of the lock holder */ async releaseLock(address: string, sessionKey: string): Promise { - if (this.logger.isLevelEnabled('debug')) { - const holdTime = Date.now() - state.acquiredAt!; + const state = this.localLockStates.get(address); + + if (this.logger.isLevelEnabled('debug') && state?.acquiredAt) { + const holdTime = Date.now() - state.acquiredAt; this.logger.debug(`Releasing lock for address ${address} and session key ${sessionKey} held for ${holdTime}ms.`); } - const state = this.localLockStates.get(address); - // Ensure only the lock owner can release if (state?.sessionKey === sessionKey) { await this.doRelease(state); @@ -103,9 +105,9 @@ export class LocalLockStrategy { * @returns The LockState object associated with the address */ private getOrCreateState(address: string): LockState { - address = address.toLowerCase(); - if (!this.localLockStates.has(address)) { - this.localLockStates.set(address, { + const normalizedAddress = LockService.normalizeAddress(address); + if (!this.localLockStates.has(normalizedAddress)) { + this.localLockStates.set(normalizedAddress, { mutex: new Mutex(), sessionKey: null, acquiredAt: null, @@ -113,7 +115,7 @@ export class LocalLockStrategy { }); } - return this.localLockStates.get(address)!; + return this.localLockStates.get(normalizedAddress)!; } /** diff --git a/packages/relay/src/lib/services/lockService/LockService.ts b/packages/relay/src/lib/services/lockService/LockService.ts index ccc34c9850..b250184b7f 100644 --- a/packages/relay/src/lib/services/lockService/LockService.ts +++ b/packages/relay/src/lib/services/lockService/LockService.ts @@ -26,9 +26,9 @@ export class LockService { * Blocks until the lock is available (no timeout on waiting). * * @param address - The sender address to acquire the lock for. - * @returns A promise that resolves to a unique session key. + * @returns A promise that resolves to a unique session key, or null if acquisition fails (fail open). */ - async acquireLock(address: string): Promise { + async acquireLock(address: string): Promise { return await this.strategy.acquireLock(address); } @@ -42,4 +42,14 @@ export class LockService { async releaseLock(address: string, sessionKey: string): Promise { await this.strategy.releaseLock(address, sessionKey); } + + /** + * Normalizes an address to lowercase for consistent key generation across lock strategies. + * + * @param address - The address to normalize. + * @returns The normalized address. + */ + static normalizeAddress(address: string): string { + return address.toLowerCase(); + } } diff --git a/packages/relay/src/lib/services/lockService/LockStrategyFactory.ts b/packages/relay/src/lib/services/lockService/LockStrategyFactory.ts index 138e0178c1..2a0082f9bb 100644 --- a/packages/relay/src/lib/services/lockService/LockStrategyFactory.ts +++ b/packages/relay/src/lib/services/lockService/LockStrategyFactory.ts @@ -5,6 +5,7 @@ import { RedisClientType } from 'redis'; import { LockStrategy } from '../../types'; import { LocalLockStrategy } from './LocalLockStrategy'; +import { RedisLockStrategy } from './RedisLockStrategy'; /** * Factory for creating LockStrategy instances. @@ -23,11 +24,10 @@ export class LockStrategyFactory { */ static create(redisClient: RedisClientType | undefined, logger: Logger): LockStrategy { - // TODO: Remove placeholder errors once strategies are implemented if (redisClient) { - // throw new Error('Redis lock strategy not yet implemented'); + return new RedisLockStrategy(redisClient, logger.child({ name: 'redis-lock-strategy' })); } - return new LocalLockStrategy(logger); + return new LocalLockStrategy(logger.child({ name: 'local-lock-strategy' })); } } diff --git a/packages/relay/src/lib/services/lockService/RedisLockStrategy.ts b/packages/relay/src/lib/services/lockService/RedisLockStrategy.ts new file mode 100644 index 0000000000..70ba1eb302 --- /dev/null +++ b/packages/relay/src/lib/services/lockService/RedisLockStrategy.ts @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: Apache-2.0 + +import { ConfigService } from '@hashgraph/json-rpc-config-service/dist/services'; +import { randomUUID } from 'crypto'; +import { Logger } from 'pino'; +import { RedisClientType } from 'redis'; + +import { LockStrategy } from '../../types/lock'; +import { LockService } from './LockService'; + +/** + * Redis-based distributed lock strategy implementing FIFO queue semantics. + * + * Uses Redis SET NX + LIST for distributed locking across multiple relay instances. + * Provides automatic TTL-based expiration and polling-based lock acquisition. + * + * @remarks + * - Lock keys: `{prefix}:{address}` stores current holder's session key + * - Queue keys: `{prefix}:queue:{address}` stores FIFO queue of waiters + * - TTL on lock keys provides automatic cleanup on crashes/hangs + */ +export class RedisLockStrategy implements LockStrategy { + private readonly redisClient: RedisClientType; + private readonly logger: Logger; + private readonly maxLockHoldMs: number; + private readonly pollIntervalMs: number; + private readonly keyPrefix = 'lock'; + + constructor(redisClient: RedisClientType, logger: Logger) { + this.redisClient = redisClient; + this.logger = logger; + this.maxLockHoldMs = ConfigService.get('LOCK_MAX_HOLD_MS'); + this.pollIntervalMs = ConfigService.get('LOCK_QUEUE_POLL_INTERVAL_MS'); + } + + /** + * Acquires a lock for the specified address using FIFO queue semantics. + * + * @param address - The sender address to acquire the lock for (will be normalized). + * @returns A promise that resolves to a unique session key upon successful acquisition, or null if acquisition fails (fail open). + */ + async acquireLock(address: string): Promise { + const sessionKey = this.generateSessionKey(); + const lockKey = this.getLockKey(address); + const queueKey = this.getQueueKey(address); + const startTime = Date.now(); + let joinedQueue = false; + + try { + // Join FIFO queue + await this.redisClient.lPush(queueKey, sessionKey); + joinedQueue = true; + + if (this.logger.isLevelEnabled('trace')) { + this.logger.trace(`Lock acquisition started: address=${address}, sessionKey=${sessionKey}`); + } + + // Poll until first in queue and can acquire lock + while (true) { + // Check if first in line + const firstInQueue = await this.redisClient.lIndex(queueKey, -1); + + if (firstInQueue === sessionKey) { + // Try to acquire lock with TTL + const acquired = await this.redisClient.set(lockKey, sessionKey, { + NX: true, // Only set if not exists + PX: this.maxLockHoldMs, // TTL in milliseconds + }); + + if (acquired) { + const acquisitionDuration = Date.now() - startTime; + const queueLength = await this.redisClient.lLen(queueKey); + + if (this.logger.isLevelEnabled('debug')) { + this.logger.debug( + `Lock acquired: address=${address}, sessionKey=${sessionKey}, duration=${acquisitionDuration}ms, queueLength=${queueLength}`, + ); + } + + return sessionKey; + } + } + + // Wait before checking again + await this.sleep(this.pollIntervalMs); + } + } catch (error) { + this.logger.error(error, `Failed to acquire lock: address=${address}, sessionKey=${sessionKey}. Failing open.`); + return null; + } finally { + // Always remove from queue if we joined it (whether success or failure) + if (joinedQueue) { + await this.removeFromQueue(queueKey, sessionKey, address); + } + } + } + + /** + * Releases a lock for the specified address. + * Only succeeds if the session key matches the current lock holder. + * + * @param address - The sender address to release the lock for (will be normalized). + * @param sessionKey - The session key proving ownership of the lock. + */ + async releaseLock(address: string, sessionKey: string): Promise { + const lockKey = this.getLockKey(address); + + try { + // Atomic check-and-delete using Lua script + // Only deletes the lock if the session key matches (ownership check) + const result = await this.redisClient.eval( + ` + if redis.call("get", KEYS[1]) == ARGV[1] then + return redis.call("del", KEYS[1]) + else + return 0 + end + `, + { + keys: [lockKey], + arguments: [sessionKey], + }, + ); + + if (result === 1) { + if (this.logger.isLevelEnabled('debug')) { + this.logger.debug(`Lock released: address=${address}, sessionKey=${sessionKey}`); + } + } else { + // Lock was already released or owned by someone else - ignore + if (this.logger.isLevelEnabled('trace')) { + this.logger.trace( + `Lock release ignored (not owner or already released): address=${address}, sessionKey=${sessionKey}`, + ); + } + } + } catch (error) { + this.logger.error(error, `Failed to release lock: address=${address}, sessionKey=${sessionKey}`); + // Don't throw - release failures should not block the caller + } + } + + /** + * Generates the Redis key for a lock. + * Automatically normalizes the address to ensure consistency. + * + * @param address - The sender address (will be normalized to lowercase). + * @returns The Redis lock key. + */ + private getLockKey(address: string): string { + const normalizedAddress = LockService.normalizeAddress(address); + return `${this.keyPrefix}:${normalizedAddress}`; + } + + /** + * Generates the Redis key for a lock queue. + * Automatically normalizes the address to ensure consistency. + * + * @param address - The sender address (will be normalized to lowercase). + * @returns The Redis queue key. + */ + private getQueueKey(address: string): string { + const normalizedAddress = LockService.normalizeAddress(address); + return `${this.keyPrefix}:queue:${normalizedAddress}`; + } + + /** + * Removes a session key from the queue. + * Used for cleanup after successful acquisition or on error. + * + * @param queueKey - The queue key. + * @param sessionKey - The session key to remove. + * @param address - The address (for logging). + */ + private async removeFromQueue(queueKey: string, sessionKey: string, address: string): Promise { + try { + await this.redisClient.lRem(queueKey, 1, sessionKey); + if (this.logger.isLevelEnabled('trace')) { + this.logger.trace(`Removed from queue: address=${address}, sessionKey=${sessionKey}`); + } + } catch (error) { + this.logger.warn(error, `Failed to remove from queue: address=${address}, sessionKey=${sessionKey}`); + } + } + + /** + * Generates a unique session key for lock acquisition. + * Protected to allow test mocking. + * + * @returns A unique session key. + */ + protected generateSessionKey(): string { + return randomUUID(); + } + + /** + * Sleeps for the specified duration. + * + * @param ms - Duration in milliseconds. + */ + private sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); + } +} diff --git a/packages/relay/src/lib/types/lock.ts b/packages/relay/src/lib/types/lock.ts index f97690e534..51f023e03a 100644 --- a/packages/relay/src/lib/types/lock.ts +++ b/packages/relay/src/lib/types/lock.ts @@ -13,9 +13,9 @@ export interface LockStrategy { * Blocks until the lock is available or timeout is reached. * * @param address - The address to acquire the lock for (will be normalized by implementation). - * @returns A promise that resolves to a unique session key upon successful acquisition. + * @returns A promise that resolves to a unique session key upon successful acquisition, or null if acquisition fails (fail open). */ - acquireLock(address: string): Promise; + acquireLock(address: string): Promise; /** * Releases a lock for the specified address. diff --git a/packages/relay/tests/lib/services/lockService/LocalLockStrategy.spec.ts b/packages/relay/tests/lib/services/lockService/LocalLockStrategy.spec.ts index cdd83d741d..9583a49502 100644 --- a/packages/relay/tests/lib/services/lockService/LocalLockStrategy.spec.ts +++ b/packages/relay/tests/lib/services/lockService/LocalLockStrategy.spec.ts @@ -83,7 +83,7 @@ describe('LocalLockStrategy', function () { expect(secondAcquired).to.be.true; }); - withOverriddenEnvsInMochaTest({ LOCAL_LOCK_MAX_LOCK_TIME: 200 }, () => { + withOverriddenEnvsInMochaTest({ LOCK_MAX_HOLD_MS: 200 }, () => { it('should auto-release after max lock time', async () => { const address = 'test-auto-release'; diff --git a/packages/relay/tests/lib/services/lockService/RedisLockStrategy.spec.ts b/packages/relay/tests/lib/services/lockService/RedisLockStrategy.spec.ts new file mode 100644 index 0000000000..d5c433cd4b --- /dev/null +++ b/packages/relay/tests/lib/services/lockService/RedisLockStrategy.spec.ts @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: Apache-2.0 + +import { expect, use } from 'chai'; +import chaiAsPromised from 'chai-as-promised'; +import { Logger, pino } from 'pino'; +import { RedisClientType } from 'redis'; +import * as sinon from 'sinon'; + +import { RedisLockStrategy } from '../../../../src/lib/services/lockService/RedisLockStrategy'; + +use(chaiAsPromised); + +describe('RedisLockStrategy Test Suite', function () { + this.timeout(10000); + + let logger: Logger; + let mockRedisClient: sinon.SinonStubbedInstance; + let redisLockStrategy: RedisLockStrategy; + + const testAddress = '0x1234567890abcdef1234567890abcdef12345678'; + const normalizedAddress = testAddress.toLowerCase(); + + beforeEach(() => { + logger = pino({ level: 'silent' }); + + // Create a mock Redis client + mockRedisClient = { + lPush: sinon.stub(), + lIndex: sinon.stub(), + set: sinon.stub(), + lRem: sinon.stub(), + lLen: sinon.stub(), + eval: sinon.stub(), + } as any; + + redisLockStrategy = new RedisLockStrategy(mockRedisClient as any, logger); + }); + + afterEach(() => { + sinon.restore(); + }); + + describe('acquireLock', () => { + it('should successfully acquire lock when first in queue', async () => { + const sessionKey = 'test-session-key'; + + // Mock queue operations + mockRedisClient.lPush.resolves(1); + mockRedisClient.lIndex.resolves(sessionKey); + mockRedisClient.set.resolves('OK'); + mockRedisClient.lRem.resolves(1); + mockRedisClient.lLen.resolves(0); + + // Stub generateSessionKey to return predictable value + sinon.stub(redisLockStrategy as any, 'generateSessionKey').returns(sessionKey); + + const result = await redisLockStrategy.acquireLock(testAddress); + + expect(result).to.equal(sessionKey); + expect(mockRedisClient.lPush.calledOnce).to.be.true; + expect(mockRedisClient.lPush.calledWith(`lock:queue:${normalizedAddress}`, sessionKey)).to.be.true; + expect(mockRedisClient.set.calledOnce).to.be.true; + expect(mockRedisClient.lRem.calledOnce).to.be.true; + expect(mockRedisClient.lRem.calledWith(`lock:queue:${normalizedAddress}`, 1, sessionKey)).to.be.true; + }); + + it('should wait in queue until first position', async () => { + const sessionKey = 'test-session-key'; + const otherSessionKey = 'other-session-key'; + + // Mock queue operations - first call returns other session, second returns our session + mockRedisClient.lPush.resolves(2); + mockRedisClient.lIndex.onFirstCall().resolves(otherSessionKey).onSecondCall().resolves(sessionKey); + mockRedisClient.set.resolves('OK'); + mockRedisClient.lRem.resolves(1); + mockRedisClient.lLen.resolves(1); + + sinon.stub(redisLockStrategy as any, 'generateSessionKey').returns(sessionKey); + + const result = await redisLockStrategy.acquireLock(testAddress); + + expect(result).to.equal(sessionKey); + expect(mockRedisClient.lIndex.callCount).to.equal(2); + expect(mockRedisClient.lRem.calledOnce).to.be.true; + }); + + it('should normalize address to lowercase', async () => { + const sessionKey = 'test-session-key'; + const upperCaseAddress = '0xABCDEF1234567890ABCDEF1234567890ABCDEF12'; + + mockRedisClient.lPush.resolves(1); + mockRedisClient.lIndex.resolves(sessionKey); + mockRedisClient.set.resolves('OK'); + mockRedisClient.lRem.resolves(1); + mockRedisClient.lLen.resolves(0); + + sinon.stub(redisLockStrategy as any, 'generateSessionKey').returns(sessionKey); + + await redisLockStrategy.acquireLock(upperCaseAddress); + + expect(mockRedisClient.lPush.calledWith(`lock:queue:${upperCaseAddress.toLowerCase()}`, sessionKey)).to.be.true; + expect(mockRedisClient.lRem.calledWith(`lock:queue:${upperCaseAddress.toLowerCase()}`, 1, sessionKey)).to.be.true; + }); + + it('should handle Redis errors during acquisition and cleanup queue (fail open)', async () => { + const sessionKey = 'test-session-key'; + const redisError = new Error('Redis connection failed'); + + // lPush succeeds, but lIndex fails + mockRedisClient.lPush.resolves(1); + mockRedisClient.lIndex.rejects(redisError); + mockRedisClient.lRem.resolves(1); + + sinon.stub(redisLockStrategy as any, 'generateSessionKey').returns(sessionKey); + + const result = await redisLockStrategy.acquireLock(testAddress); + + // Should return null (fail open) instead of throwing + expect(result).to.be.null; + + // Should have attempted cleanup + expect(mockRedisClient.lRem.calledOnce).to.be.true; + expect(mockRedisClient.lRem.calledWith(`lock:queue:${normalizedAddress}`, 1, sessionKey)).to.be.true; + }); + + it('should handle Redis errors before joining queue without cleanup (fail open)', async () => { + const sessionKey = 'test-session-key'; + const redisError = new Error('Redis connection failed'); + + // lPush fails immediately + mockRedisClient.lPush.rejects(redisError); + + sinon.stub(redisLockStrategy as any, 'generateSessionKey').returns(sessionKey); + + const result = await redisLockStrategy.acquireLock(testAddress); + + // Should return null (fail open) instead of throwing + expect(result).to.be.null; + + // Should NOT have attempted cleanup (never joined queue) + expect(mockRedisClient.lRem.called).to.be.false; + }); + }); + + describe('releaseLock', () => { + it('should successfully release lock with valid session key', async () => { + const sessionKey = 'test-session-key'; + + // Mock Lua script execution - return 1 for successful deletion + mockRedisClient.eval.resolves(1); + + await redisLockStrategy.releaseLock(testAddress, sessionKey); + + expect(mockRedisClient.eval.calledOnce).to.be.true; + const evalCall = mockRedisClient.eval.getCall(0); + expect(evalCall.args[0]).to.be.a('string'); // Lua script + expect(evalCall.args[1]).to.deep.equal({ + keys: [`lock:${normalizedAddress}`], + arguments: [sessionKey], + }); + }); + + it('should ignore release with invalid session key', async () => { + const sessionKey = 'test-session-key'; + + // Mock Lua script execution - return 0 for no deletion (not owner) + mockRedisClient.eval.resolves(0); + + // Should not throw + await redisLockStrategy.releaseLock(testAddress, sessionKey); + + expect(mockRedisClient.eval.calledOnce).to.be.true; + }); + + it('should handle Redis errors during release gracefully', async () => { + const sessionKey = 'test-session-key'; + const redisError = new Error('Redis connection failed'); + + mockRedisClient.eval.rejects(redisError); + + // Should not throw - release failures should not block caller + await redisLockStrategy.releaseLock(testAddress, sessionKey); + + expect(mockRedisClient.eval.calledOnce).to.be.true; + }); + + it('should normalize address during release', async () => { + const sessionKey = 'test-session-key'; + const upperCaseAddress = '0xABCDEF1234567890ABCDEF1234567890ABCDEF12'; + + mockRedisClient.eval.resolves(1); + + await redisLockStrategy.releaseLock(upperCaseAddress, sessionKey); + + const evalCall = mockRedisClient.eval.getCall(0); + expect(evalCall).to.not.be.null; + expect((evalCall as any).args[1].keys[0]).to.equal(`lock:${upperCaseAddress.toLowerCase()}`); + }); + }); + + describe('FIFO ordering', () => { + it('should maintain FIFO order for multiple waiters', async () => { + const session1 = 'session-1'; + + // Simulate session joining queue + mockRedisClient.lPush.resolves(1); + + // First session acquires immediately + mockRedisClient.lIndex.onCall(0).resolves(session1); + mockRedisClient.set.onCall(0).resolves('OK'); + mockRedisClient.lRem.onCall(0).resolves(1); + mockRedisClient.lLen.resolves(2); + + sinon.stub(redisLockStrategy as any, 'generateSessionKey').returns(session1); + + const result1 = await redisLockStrategy.acquireLock(testAddress); + expect(result1).to.equal(session1); + + // Verify LPUSH was called (adding to queue) + expect(mockRedisClient.lPush.calledWith(`lock:queue:${normalizedAddress}`, session1)).to.be.true; + // Verify LREM was called (removing from queue) + expect(mockRedisClient.lRem.calledWith(`lock:queue:${normalizedAddress}`, 1, session1)).to.be.true; + }); + }); + + describe('TTL-based expiration', () => { + it('should set TTL when acquiring lock', async () => { + const sessionKey = 'test-session-key'; + + mockRedisClient.lPush.resolves(1); + mockRedisClient.lIndex.resolves(sessionKey); + mockRedisClient.set.resolves('OK'); + mockRedisClient.lRem.resolves(1); + mockRedisClient.lLen.resolves(0); + + sinon.stub(redisLockStrategy as any, 'generateSessionKey').returns(sessionKey); + + await redisLockStrategy.acquireLock(testAddress); + + // Verify SET was called with NX and PX options + const setCall = mockRedisClient.set.getCall(0); + expect(setCall.args[0]).to.equal(`lock:${normalizedAddress}`); + expect(setCall.args[1]).to.equal(sessionKey); + expect(setCall.args[2]).to.deep.include({ NX: true }); + expect(setCall.args[2]).to.have.property('PX'); + }); + }); + + describe('Error handling and resilience', () => { + it('should handle cleanup failures gracefully', async () => { + const sessionKey = 'test-session-key'; + const redisError = new Error('Redis connection failed'); + + // lPush succeeds, lIndex fails, lRem also fails + mockRedisClient.lPush.resolves(1); + mockRedisClient.lIndex.rejects(redisError); + mockRedisClient.lRem.rejects(new Error('Cleanup failed')); + + sinon.stub(redisLockStrategy as any, 'generateSessionKey').returns(sessionKey); + + const result = await redisLockStrategy.acquireLock(testAddress); + + // Should return null (fail open) instead of throwing + expect(result).to.be.null; + + // Cleanup was attempted + expect(mockRedisClient.lRem.calledOnce).to.be.true; + }); + }); +});