Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .env.http.example
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,11 @@ OPERATOR_KEY_MAIN= # Operator private key used to sign transaction
# ========== TRANSACTION POOL ========
# PENDING_TRANSACTION_STORAGE_TTL=30 # Time-to-live (TTL) in seconds for transaction payloads stored in Redis

# ========== LOCK SERVICE ===========
# LOCK_MAX_HOLD_MS=30000 # Maximum time in milliseconds a lock can be held before automatic force-release
# LOCAL_LOCK_MAX_ENTRIES=1000 # Maximum number of lock entries stored in memory
# LOCK_QUEUE_POLL_INTERVAL_MS=50 # Interval in milliseconds between queue position checks when waiting for a lock

# ========== HBAR RATE LIMITING ==========
# HBAR_RATE_LIMIT_TINYBAR=25000000000 # Total HBAR budget (250 HBARs)
# HBAR_RATE_LIMIT_DURATION=86400000 # HBAR budget limit duration (1 day)
Expand Down
5 changes: 5 additions & 0 deletions .env.ws.example
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,11 @@ SUBSCRIPTIONS_ENABLED=true # Must be true for the WebSocket server to func
# ========== TRANSACTION POOL ========
# PENDING_TRANSACTION_STORAGE_TTL=30 # Time-to-live (TTL) in seconds for transaction payloads stored in Redis

# ========== LOCK SERVICE ===========
# LOCK_MAX_HOLD_MS=30000 # Maximum time in milliseconds a lock can be held before automatic force-release
# LOCAL_LOCK_MAX_ENTRIES=1000 # Maximum number of lock entries stored in memory
# LOCK_QUEUE_POLL_INTERVAL_MS=50 # Interval in milliseconds between queue position checks when waiting for a lock

# ========== OTHER SETTINGS ==========
# CLIENT_TRANSPORT_SECURITY=false # Enable or disable TLS for both networks
# USE_ASYNC_TX_PROCESSING=true # If true, returns tx hash immediately after prechecks
Expand Down
5 changes: 5 additions & 0 deletions charts/hedera-json-rpc-relay/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,11 @@ config:
# REDIS_RECONNECT_DELAY_MS:
# MULTI_SET:

# ========== LOCK SERVICE CONFIGURATION ==========
# LOCK_MAX_HOLD_MS:
# LOCAL_LOCK_MAX_ENTRIES:
# LOCK_QUEUE_POLL_INTERVAL_MS:

# ========== DEVELOPMENT & TESTING ==========
# LOG_LEVEL: 'trace'

Expand Down
3 changes: 2 additions & 1 deletion docs/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,6 @@ Unless you need to set a non-default value, it is recommended to only populate o
| `JUMBO_TX_ENABLED` | "true" | Controls how large transactions are handled during `eth_sendRawTransaction`. When set to `true`, transactions up to 128KB can be sent directly to consensus nodes without using Hedera File Service (HFS), as long as contract bytecode doesn't exceed 24KB. When set to `false`, all transactions containing contract deployments use the traditional HFS approach. This feature leverages the increased transaction size limit to simplify processing of standard Ethereum transactions. |
| `LIMIT_DURATION` | "60000" | The maximum duration in ms applied to IP-method based rate limits. |
| `LOCAL_LOCK_MAX_ENTRIES` | "1000" | Maximum number of lock entries stored in memory. Prevents unbounded memory growth. |
| `LOCAL_LOCK_MAX_LOCK_TIME` | "30000" | Timer to auto-release if lock not manually released (in ms). |
| `MAX_GAS_ALLOWANCE_HBAR` | "0" | The maximum amount, in hbars, that the JSON-RPC Relay is willing to pay to complete the transaction in case the senders don't provide enough funds. Please note, in case of fully subsidized transactions, the sender must set the gas price to `0` and the JSON-RPC Relay must configure the `MAX_GAS_ALLOWANCE_HBAR` parameter high enough to cover the entire transaction cost. |
| `MAX_TRANSACTION_FEE_THRESHOLD` | "15000000" | Used to set the max transaction fee. This is the HAPI fee which is paid by the relay operator account. |
| `MIRROR_NODE_AGENT_CACHEABLE_DNS` | "true" | Flag to set if the mirror node agent should cacheable DNS lookups, using better-lookup library. |
Expand Down Expand Up @@ -107,6 +106,8 @@ Unless you need to set a non-default value, it is recommended to only populate o
| `TX_DEFAULT_GAS` | "400000" | Default gas for transactions that do not specify gas. |
| `TXPOOL_API_ENABLED` | "false" | Enables all txpool related methods. |
| `USE_ASYNC_TX_PROCESSING` | "true" | Set to `true` to enable `eth_sendRawTransaction` to return the transaction hash immediately after passing all prechecks, while processing the transaction asynchronously in the background. |
| `LOCK_MAX_HOLD_MS` | "30000" | Maximum time in milliseconds a lock can be held before automatic force-release. This TTL prevents deadlocks when transaction processing hangs or crashes. Default is 30 seconds. |
| `LOCK_QUEUE_POLL_INTERVAL_MS` | "50" | Interval in milliseconds between queue position checks when waiting for a lock. Lower values provide faster lock acquisition but increase Redis load. Default is 50ms. |
| `USE_MIRROR_NODE_MODULARIZED_SERVICES` | null | Controls routing of Mirror Node traffic through modularized services. When set to `true`, enables routing a percentage of traffic to modularized services. When set to `false`, ensures traffic follows the traditional non-modularized flow. When not set (i.e. `null` by default), no specific routing preference is applied. As Mirror Node gradually transitions to a fully modularized architecture across all networks, this setting will eventually default to `true`. |

## Server
Expand Down
15 changes: 10 additions & 5 deletions packages/config-service/src/services/globalConfig.ts
Original file line number Diff line number Diff line change
Expand Up @@ -368,11 +368,6 @@ const _CONFIG = {
required: false,
defaultValue: 1000,
},
LOCAL_LOCK_MAX_LOCK_TIME: {
type: 'number',
required: false,
defaultValue: 30000,
},
LOG_LEVEL: {
type: 'string',
required: false,
Expand Down Expand Up @@ -659,6 +654,16 @@ const _CONFIG = {
required: false,
defaultValue: true,
},
LOCK_MAX_HOLD_MS: {
type: 'number',
required: false,
defaultValue: 30000,
},
LOCK_QUEUE_POLL_INTERVAL_MS: {
type: 'number',
required: false,
defaultValue: 50,
},
USE_MIRROR_NODE_MODULARIZED_SERVICES: {
type: 'boolean',
required: false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ import { randomUUID } from 'crypto';
import { LRUCache } from 'lru-cache';
import { Logger } from 'pino';

import { LockService } from './LockService';

/**
* Represents the internal state for a lock associated with a given address.
*/
Expand Down Expand Up @@ -71,7 +73,7 @@ export class LocalLockStrategy {
// Start a 30-second timer to auto-release if lock not manually released
state.lockTimeoutId = setTimeout(() => {
this.forceReleaseExpiredLock(address, sessionKey);
}, ConfigService.get('LOCAL_LOCK_MAX_LOCK_TIME'));
}, ConfigService.get('LOCK_MAX_HOLD_MS'));

return sessionKey;
}
Expand All @@ -83,13 +85,13 @@ export class LocalLockStrategy {
* @param sessionKey - The session key of the lock holder
*/
async releaseLock(address: string, sessionKey: string): Promise<void> {
if (this.logger.isLevelEnabled('debug')) {
const holdTime = Date.now() - state.acquiredAt!;
const state = this.localLockStates.get(address);

if (this.logger.isLevelEnabled('debug') && state?.acquiredAt) {
const holdTime = Date.now() - state.acquiredAt;
this.logger.debug(`Releasing lock for address ${address} and session key ${sessionKey} held for ${holdTime}ms.`);
}

const state = this.localLockStates.get(address);

// Ensure only the lock owner can release
if (state?.sessionKey === sessionKey) {
await this.doRelease(state);
Expand All @@ -103,17 +105,17 @@ export class LocalLockStrategy {
* @returns The LockState object associated with the address
*/
private getOrCreateState(address: string): LockState {
address = address.toLowerCase();
if (!this.localLockStates.has(address)) {
this.localLockStates.set(address, {
const normalizedAddress = LockService.normalizeAddress(address);
if (!this.localLockStates.has(normalizedAddress)) {
this.localLockStates.set(normalizedAddress, {
mutex: new Mutex(),
sessionKey: null,
acquiredAt: null,
lockTimeoutId: null,
});
}

return this.localLockStates.get(address)!;
return this.localLockStates.get(normalizedAddress)!;
}

/**
Expand Down
14 changes: 12 additions & 2 deletions packages/relay/src/lib/services/lockService/LockService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ export class LockService {
* Blocks until the lock is available (no timeout on waiting).
*
* @param address - The sender address to acquire the lock for.
* @returns A promise that resolves to a unique session key.
* @returns A promise that resolves to a unique session key, or null if acquisition fails (fail open).
*/
async acquireLock(address: string): Promise<string> {
async acquireLock(address: string): Promise<string | null> {
return await this.strategy.acquireLock(address);
}

Expand All @@ -42,4 +42,14 @@ export class LockService {
async releaseLock(address: string, sessionKey: string): Promise<void> {
await this.strategy.releaseLock(address, sessionKey);
}

/**
* Normalizes an address to lowercase for consistent key generation across lock strategies.
*
* @param address - The address to normalize.
* @returns The normalized address.
*/
static normalizeAddress(address: string): string {
return address.toLowerCase();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import { RedisClientType } from 'redis';

import { LockStrategy } from '../../types';
import { LocalLockStrategy } from './LocalLockStrategy';
import { RedisLockStrategy } from './RedisLockStrategy';

/**
* Factory for creating LockStrategy instances.
Expand All @@ -23,11 +24,10 @@ export class LockStrategyFactory {
*/

static create(redisClient: RedisClientType | undefined, logger: Logger): LockStrategy {
// TODO: Remove placeholder errors once strategies are implemented
if (redisClient) {
// throw new Error('Redis lock strategy not yet implemented');
return new RedisLockStrategy(redisClient, logger.child({ name: 'redis-lock-strategy' }));
}

return new LocalLockStrategy(logger);
return new LocalLockStrategy(logger.child({ name: 'local-lock-strategy' }));
}
}
Loading