Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 31 additions & 2 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ Repository = 'https://github.com/elevenlabs/elevenlabs-python'

[tool.poetry.dependencies]
python = "^3.8"
httpx = ">=0.21.2"
httpx = {version = "^0.28.1", extras = ["socks"]}
python-socks = ">=2.7.1"
pyaudio = { version = ">=0.2.14", optional = true}
pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
httpx>=0.21.2
httpx[socks]>=0.28.1
python-socks>=2.7.1
pyaudio>=0.2.14
pydantic>= 1.9.2
pydantic-core==2.18.2
Expand Down
6 changes: 4 additions & 2 deletions src/elevenlabs/conversational_ai/conversation.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ class Conversation:
callback_user_transcript: Optional[Callable[[str], None]]
callback_latency_measurement: Optional[Callable[[int], None]]
callback_end_session: Optional[Callable]

proxy_url: Optional[str]
_thread: Optional[threading.Thread]
_should_stop: threading.Event
_conversation_id: Optional[str]
Expand All @@ -263,6 +263,7 @@ def __init__(
callback_user_transcript: Optional[Callable[[str], None]] = None,
callback_latency_measurement: Optional[Callable[[int], None]] = None,
callback_end_session: Optional[Callable] = None,
proxy_url: Optional[str] = None,
):
"""Conversational AI session.

Expand Down Expand Up @@ -295,6 +296,7 @@ def __init__(
self.callback_user_transcript = callback_user_transcript
self.callback_latency_measurement = callback_latency_measurement
self.callback_end_session = callback_end_session
self.proxy_url = proxy_url

self.client_tools.start()

Expand Down Expand Up @@ -395,7 +397,7 @@ def send_contextual_update(self, text: str):
raise

def _run(self, ws_url: str):
with connect(ws_url, max_size=16 * 1024 * 1024) as ws:
with connect(ws_url, max_size=16 * 1024 * 1024, proxy=self.proxy_url) as ws:
self._ws = ws
ws.send(
json.dumps(
Expand Down