Dark Mode

Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

litellm.InternalServerError with Qwen3 #2113

Open
Open
litellm.InternalServerError with Qwen3#2113
Labels
bugSomething isn't working

Description

Hi,

I was having a trouble when deployed with Qwen3 LLM, and didn't know what happened exactly. Initially, I thought it was a network issue preventing a connection to the LLM server, but I was able to curl it successfully from within the ai-service container. I guess it could be some issues about configration.

config.yaml

type: llm
provider: litellm_llm
timeout: 120
models:
- alias: default
model: openai/Qwen3-235b
api_base: http://qwen3-235b.hnzhrd.gov.cn/v1
api_key_name: OPENAI_API_KEY
context_window_size: 32768
timeout: 120
kwargs:
max_tokens: 8192
n: 1
seed: 0
temperature: 0

- model: openai/Qwen2.5-32B-Instruct
api_base: http://qwen25.hnzhrd.gov.cn/v1
api_key_name: OPENAI_API_KEY
context_window_size: 32768
timeout: 120
kwargs:
max_tokens: 8192
n: 1
seed: 0
temperature: 0

---

type: embedder
provider: litellm_embedder
models:
- alias: default
model: openai/Qwen3-Embedding-8B
api_base: http://qwen30.hnzhrd.gov.cn/v1
api_key_name: OPENAI_API_KEY
timeout: 120

---
type: engine
provider: wren_ui
endpoint: http://wren-ui:3000

---
type: engine
provider: wren_ibis
endpoint: http://ibis-server:8000

---
type: document_store
provider: qdrant
location: http://qdrant:6333
embedding_model_dim: 4096
timeout: 120
recreate_index: true

---
type: pipeline
pipes:
- name: db_schema_indexing
embedder: litellm_embedder.default
document_store: qdrant
- name: historical_question_indexing
embedder: litellm_embedder.default
document_store: qdrant
- name: table_description_indexing
embedder: litellm_embedder.default
document_store: qdrant
- name: db_schema_retrieval
llm: litellm_llm.default
embedder: litellm_embedder.default
document_store: qdrant
- name: historical_question_retrieval
embedder: litellm_embedder.default
document_store: qdrant
- name: sql_generation
llm: litellm_llm.default
engine: wren_ui
document_store: qdrant
- name: sql_correction
llm: litellm_llm.default
engine: wren_ui
document_store: qdrant
- name: followup_sql_generation
llm: litellm_llm.default
engine: wren_ui
document_store: qdrant
- name: sql_answer
llm: litellm_llm.default
- name: semantics_description
llm: litellm_llm.default
- name: relationship_recommendation
llm: litellm_llm.default
- name: question_recommendation
llm: litellm_llm.default
- name: question_recommendation_sql_generation
llm: litellm_llm.default
engine: wren_ui
document_store: qdrant
- name: intent_classification
llm: litellm_llm.default
embedder: litellm_embedder.default
document_store: qdrant
- name: misleading_assistance
llm: litellm_llm.default
- name: data_assistance
llm: litellm_llm.default
- name: sql_pairs_indexing
document_store: qdrant
embedder: litellm_embedder.default
- name: sql_pairs_retrieval
document_store: qdrant
embedder: litellm_embedder.default
llm: litellm_llm.default
- name: preprocess_sql_data
llm: litellm_llm.default
- name: sql_executor
engine: wren_ui
- name: chart_generation
llm: litellm_llm.default
- name: chart_adjustment
llm: litellm_llm.default
- name: user_guide_assistance
llm: litellm_llm.default
- name: sql_question_generation
llm: litellm_llm.default
- name: sql_generation_reasoning
llm: litellm_llm.default
- name: followup_sql_generation_reasoning
llm: litellm_llm.default
- name: sql_regeneration
llm: litellm_llm.default
engine: wren_ui
- name: instructions_indexing
embedder: litellm_embedder.default
document_store: qdrant
- name: instructions_retrieval
embedder: litellm_embedder.default
document_store: qdrant
- name: sql_functions_retrieval
engine: wren_ibis
document_store: qdrant
- name: project_meta_indexing
document_store: qdrant
- name: sql_tables_extraction
llm: litellm_llm.default
- name: sql_diagnosis
llm: litellm_llm.default
- name: sql_knowledge_retrieval
engine: wren_ibis
document_store: qdrant

---
settings:
doc_endpoint: https://docs.getwren.ai
is_oss: true
engine_timeout: 30
column_indexing_batch_size: 50
table_retrieval_size: 10
table_column_retrieval_size: 100
allow_intent_classification: true
allow_sql_generation_reasoning: true
allow_sql_functions_retrieval: true
enable_column_pruning: false
max_sql_correction_retries: 3
query_cache_maxsize: 1000
query_cache_ttl: 3600
langfuse_host: https://cloud.langfuse.com
langfuse_enable: true
logging_level: DEBUG
development: false
historical_question_retrieval_similarity_threshold: 0.9
sql_pairs_similarity_threshold: 0.7
sql_pairs_retrieval_max_size: 10
instructions_similarity_threshold: 0.7
instructions_top_k: 10

docker-compose.yaml

version: "3"

volumes:
data:


networks:
wren:
driver: bridge

services:
bootstrap:
image: ghcr.io/canner/wren-bootstrap:${WREN_BOOTSTRAP_VERSION}
restart: on-failure
platform: ${PLATFORM}
environment:
DATA_PATH: /app/data
volumes:
- data:/app/data
command: /bin/sh /app/init.sh

wren-engine:
image: ghcr.io/canner/wren-engine:${WREN_ENGINE_VERSION}
restart: on-failure
platform: ${PLATFORM}
expose:
- ${WREN_ENGINE_PORT}
- ${WREN_ENGINE_SQL_PORT}
volumes:
- data:/usr/src/app/etc
- ${PROJECT_DIR}/data:/usr/src/app/data
networks:
- wren
depends_on:
- bootstrap

ibis-server:
image: ghcr.io/canner/wren-engine-ibis:${IBIS_SERVER_VERSION}
restart: on-failure
platform: ${PLATFORM}
expose:
- ${IBIS_SERVER_PORT}
environment:
WREN_ENGINE_ENDPOINT: http://wren-engine:${WREN_ENGINE_PORT}
volumes:
- ${LOCAL_STORAGE:-.}:/usr/src/app/data
networks:
- wren

wren-ai-service:
image: ghcr.io/canner/wren-ai-service:${WREN_AI_SERVICE_VERSION}
restart: on-failure
platform: ${PLATFORM}
expose:
- ${WREN_AI_SERVICE_PORT}
ports:
- ${AI_SERVICE_FORWARD_PORT}:${WREN_AI_SERVICE_PORT}
environment:
# sometimes the console won't show print messages,
# using PYTHONUNBUFFERED: 1 can fix this
PYTHONUNBUFFERED: 1
CONFIG_PATH: /app/config.yaml
NO_PROXY: "localhost,127.0.0.1,qwen3-235b.hnzhrd.gov.cn,qwen25.hnzhrd.gov.cn,qwen30.hnzhrd.gov.cn"
env_file:
- ${PROJECT_DIR}/.env
volumes:
- ${PROJECT_DIR}/config.yaml:/app/config.yaml:ro
- ${PROJECT_DIR}/data:/app/data:ro
networks:
- wren
depends_on:
- qdrant
extra_hosts:
- "qwen3-235b.hnzhrd.gov.cn:59.232.2.15"
- "qwen25.hnzhrd.gov.cn:59.232.2.15"
- "qwen30.hnzhrd.gov.cn:59.232.2.15"

qdrant:
image: qdrant/qdrant:v1.11.0
restart: on-failure
expose:
- 6333
- 6334
volumes:
- data:/qdrant/storage
networks:
- wren

wren-ui:
image: ghcr.io/canner/wren-ui:${WREN_UI_VERSION}
restart: on-failure
platform: ${PLATFORM}
environment:
DB_TYPE: sqlite
# /app is the working directory in the container
SQLITE_FILE: /app/data/db.sqlite3
WREN_ENGINE_ENDPOINT: http://wren-engine:${WREN_ENGINE_PORT}
WREN_AI_ENDPOINT: http://wren-ai-service:${WREN_AI_SERVICE_PORT}
IBIS_SERVER_ENDPOINT: http://ibis-server:${IBIS_SERVER_PORT}
# this is for telemetry to know the model, i think ai-service might be able to provide a endpoint to get the information
GENERATION_MODEL: ${GENERATION_MODEL}
# telemetry
WREN_ENGINE_PORT: ${WREN_ENGINE_PORT}
WREN_AI_SERVICE_VERSION: ${WREN_AI_SERVICE_VERSION}
WREN_UI_VERSION: ${WREN_UI_VERSION}
WREN_ENGINE_VERSION: ${WREN_ENGINE_VERSION}
USER_UUID: ${USER_UUID}
POSTHOG_API_KEY: ${POSTHOG_API_KEY}
POSTHOG_HOST: ${POSTHOG_HOST}
TELEMETRY_ENABLED: ${TELEMETRY_ENABLED}
# client side
NEXT_PUBLIC_USER_UUID: ${USER_UUID}
NEXT_PUBLIC_POSTHOG_API_KEY: ${POSTHOG_API_KEY}
NEXT_PUBLIC_POSTHOG_HOST: ${POSTHOG_HOST}
NEXT_PUBLIC_TELEMETRY_ENABLED: ${TELEMETRY_ENABLED}
EXPERIMENTAL_ENGINE_RUST_VERSION: ${EXPERIMENTAL_ENGINE_RUST_VERSION}
# configs
WREN_PRODUCT_VERSION: ${WREN_PRODUCT_VERSION}
ports:
# HOST_PORT is the port you want to expose to the host machine
- ${HOST_PORT}:3000
volumes:
- data:/app/data
networks:
- wren
depends_on:
- wren-ai-service
- wren-engine

relevant logs of wrenai-wren-ai-service-1

Traceback (most recent call last):
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/custom _httpx/aiohttp_transport.py", line 59, in map_aiohttp_exceptions
yield
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/custom _httpx/aiohttp_transport.py", line 281, in handle_async_request
response = await self._make_aiohttp_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/custom _httpx/aiohttp_transport.py", line 248, in _make_aiohttp_request
response = await client_session.request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/aiohttp/client.py", line 1510, in __aenter__
self._resp: _RetType = await self._coro
^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/aiohttp/client.py", line 779, in _request
resp = await handler(req)
^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/aiohttp/client.py", line 757, in _connect_and_send_request
await resp.start(conn)
File "/app/.venv/lib/python3.12/site-packages/aiohttp/client_reqr ep.py", line 539, in start
message, payload = await protocol.read() # type: ignore[union-attr]
^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/aiohttp/streams.py" , line 680, in read
await self._waiter
aiohttp.client_exceptions.ServerDisconnectedError: Server disconnected

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
File "/app/.venv/lib/python3.12/site-packages/openai/_base_client .py", line 1529, in request
response = await self._client.send(
^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/httpx/_client.py", line 1674, in send
response = await self._send_handling_auth(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/httpx/_client.py", line 1702, in _send_handling_auth
response = await self._send_handling_redirects(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/httpx/_client.py", line 1739, in _send_handling_redirects
response = await self._send_single_request(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/httpx/_client.py", line 1776, in _send_single_request
response = await transport.handle_async_request(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/custom _httpx/aiohttp_transport.py", line 280, in handle_async_request
with map_aiohttp_exceptions():
File "/usr/local/lib/python3.12/contextlib.py", line 155, in __exit__
self.gen.throw(value)
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/custom _httpx/aiohttp_transport.py", line 73, in map_aiohttp_exceptions
raise mapped_exc(message) from exc
httpx.ReadError: Server disconnected

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/openai /openai.py", line 823, in acompletion
headers, response = await self.make_openai_chat_completion_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/litellm_cor e_utils/logging_utils.py", line 190, in async_wrapper
result = await func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/openai /openai.py", line 454, in make_openai_chat_completion_request
raise e
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/openai /openai.py", line 436, in make_openai_chat_completion_request
await openai_aclient.chat.completions.with_raw_response.create(
File "/app/.venv/lib/python3.12/site-packages/openai/_legacy_resp onse.py", line 381, in wrapped
return cast(LegacyAPIResponse[R], await func(*args, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/openai/resources/ch at/completions/completions.py", line 2585, in create
return await self._post(
^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/openai/_base_client .py", line 1794, in post
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/openai/_base_client .py", line 1561, in request
raise APIConnectionError(request=request) from err
openai.APIConnectionError: Connection error.

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "/app/.venv/lib/python3.12/site-packages/litellm/main.py", line 598, in acompletion
response = await init_response
^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/openai /openai.py", line 870, in acompletion
raise OpenAIError(
litellm.llms.openai.common_utils.OpenAIError: Connection error.

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "/app/.venv/lib/python3.12/site-packages/hamilton/async_driv er.py", line 139, in new_fn
await fn(**fn_kwargs) if asyncio.iscoroutinefunction(fn) else fn(**fn_kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/langfuse/decorators /langfuse_decorator.py", line 219, in async_wrapper
self._handle_exception(observation, e)
File "/app/.venv/lib/python3.12/site-packages/langfuse/decorators /langfuse_decorator.py", line 520, in _handle_exception
raise e
File "/app/.venv/lib/python3.12/site-packages/langfuse/decorators /langfuse_decorator.py", line 217, in async_wrapper
result = await func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/src/utils.py", line 167, in wrapper
result, generator_name = await func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/src/pipelines/generation/intent_classification.py", line 296, in classify_intent
return await generator(prompt=prompt.get("prompt")), generator_name
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/backoff/_async.py", line 151, in retry
ret = await target(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/src/providers/llm/litellm.py", line 116, in _run
completion = await acompletion(
^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/utils.py", line 1638, in wrapper_async
raise e
File "/app/.venv/lib/python3.12/site-packages/litellm/utils.py", line 1484, in wrapper_async
result = await original_function(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/main.py", line 617, in acompletion
raise exception_type(
^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/litellm_cor e_utils/exception_mapping_utils.py", line 2323, in exception_type
raise e
File "/app/.venv/lib/python3.12/site-packages/litellm/litellm_cor e_utils/exception_mapping_utils.py", line 522, in exception_type
raise InternalServerError(
litellm.exceptions.InternalServerError: litellm.InternalServerError: InternalServerError: OpenAIException - Connection error.
-------------------------------------------------------------------
Oh no an error! Need help with Hamilton?
Join our slack and ask for help! https://join.slack.com/t/hamilton-opensource/shared_invite/zt-2niepkra8-DGKGf_tTYhXuJWBTXtIs4g
-------------------------------------------------------------------

E0129 01:43:40.714 12 wren-ai-service:608] ask pipeline - OTHERS: litellm.InternalServerError: InternalServerError: OpenAIException - Connection error.
Traceback (most recent call last):
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/custom _httpx/aiohttp_transport.py", line 59, in map_aiohttp_exceptions
yield
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/custom _httpx/aiohttp_transport.py", line 281, in handle_async_request
response = await self._make_aiohttp_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/custom _httpx/aiohttp_transport.py", line 248, in _make_aiohttp_request
response = await client_session.request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/aiohttp/client.py", line 1510, in __aenter__
self._resp: _RetType = await self._coro
^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/aiohttp/client.py", line 779, in _request
resp = await handler(req)
^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/aiohttp/client.py", line 757, in _connect_and_send_request
await resp.start(conn)
File "/app/.venv/lib/python3.12/site-packages/aiohttp/client_reqr ep.py", line 539, in start
message, payload = await protocol.read() # type: ignore[union-attr]
^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/aiohttp/streams.py" , line 680, in read
await self._waiter
aiohttp.client_exceptions.ServerDisconnectedError: Server disconnected

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
File "/app/.venv/lib/python3.12/site-packages/openai/_base_client .py", line 1529, in request
response = await self._client.send(
^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/httpx/_client.py", line 1674, in send
response = await self._send_handling_auth(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/httpx/_client.py", line 1702, in _send_handling_auth
response = await self._send_handling_redirects(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/httpx/_client.py", line 1739, in _send_handling_redirects
response = await self._send_single_request(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/httpx/_client.py", line 1776, in _send_single_request
response = await transport.handle_async_request(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/custom _httpx/aiohttp_transport.py", line 280, in handle_async_request
with map_aiohttp_exceptions():
File "/usr/local/lib/python3.12/contextlib.py", line 155, in __exit__
self.gen.throw(value)
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/custom _httpx/aiohttp_transport.py", line 73, in map_aiohttp_exceptions
raise mapped_exc(message) from exc
httpx.ReadError: Server disconnected

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/openai /openai.py", line 823, in acompletion
headers, response = await self.make_openai_chat_completion_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/litellm_cor e_utils/logging_utils.py", line 190, in async_wrapper
result = await func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/openai /openai.py", line 454, in make_openai_chat_completion_request
raise e
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/openai /openai.py", line 436, in make_openai_chat_completion_request
await openai_aclient.chat.completions.with_raw_response.create(
File "/app/.venv/lib/python3.12/site-packages/openai/_legacy_resp onse.py", line 381, in wrapped
return cast(LegacyAPIResponse[R], await func(*args, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/openai/resources/ch at/completions/completions.py", line 2585, in create
return await self._post(
^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/openai/_base_client .py", line 1794, in post
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/openai/_base_client .py", line 1561, in request
raise APIConnectionError(request=request) from err
openai.APIConnectionError: Connection error.

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "/app/.venv/lib/python3.12/site-packages/litellm/main.py", line 598, in acompletion
response = await init_response
^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/llms/openai /openai.py", line 870, in acompletion
raise OpenAIError(
litellm.llms.openai.common_utils.OpenAIError: Connection error.

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "/src/web/v1/services/ask.py", line 234, in ask
await self._pipelines["intent_classification"].run(
File "/app/.venv/lib/python3.12/site-packages/langfuse/decorators /langfuse_decorator.py", line 219, in async_wrapper
self._handle_exception(observation, e)
File "/app/.venv/lib/python3.12/site-packages/langfuse/decorators /langfuse_decorator.py", line 520, in _handle_exception
raise e
File "/app/.venv/lib/python3.12/site-packages/langfuse/decorators /langfuse_decorator.py", line 217, in async_wrapper
result = await func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/src/pipelines/generation/intent_classification.py", line 388, in run
return await self._pipe.execute(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/hamilton/async_driv er.py", line 392, in execute
raise e
File "/app/.venv/lib/python3.12/site-packages/hamilton/async_driv er.py", line 383, in execute
outputs = await self.raw_execute(_final_vars, overrides, display_graph, inputs=inputs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/hamilton/async_driv er.py", line 343, in raw_execute
raise e
File "/app/.venv/lib/python3.12/site-packages/hamilton/async_driv er.py", line 338, in raw_execute
results = await await_dict_of_tasks(task_dict)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/hamilton/async_driv er.py", line 40, in await_dict_of_tasks
coroutines_gathered = await asyncio.gather(*coroutines)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/hamilton/async_driv er.py", line 53, in process_value
return await val
^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/hamilton/async_driv er.py", line 108, in new_fn
fn_kwargs = await await_dict_of_tasks(task_dict)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/hamilton/async_driv er.py", line 40, in await_dict_of_tasks
coroutines_gathered = await asyncio.gather(*coroutines)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/hamilton/async_driv er.py", line 53, in process_value
return await val
^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/hamilton/async_driv er.py", line 139, in new_fn
await fn(**fn_kwargs) if asyncio.iscoroutinefunction(fn) else fn(**fn_kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/langfuse/decorators /langfuse_decorator.py", line 219, in async_wrapper
self._handle_exception(observation, e)
File "/app/.venv/lib/python3.12/site-packages/langfuse/decorators /langfuse_decorator.py", line 520, in _handle_exception
raise e
File "/app/.venv/lib/python3.12/site-packages/langfuse/decorators /langfuse_decorator.py", line 217, in async_wrapper
result = await func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/src/utils.py", line 167, in wrapper
result, generator_name = await func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/src/pipelines/generation/intent_classification.py", line 296, in classify_intent
return await generator(prompt=prompt.get("prompt")), generator_name
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/backoff/_async.py", line 151, in retry
ret = await target(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/src/providers/llm/litellm.py", line 116, in _run
completion = await acompletion(
^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/utils.py", line 1638, in wrapper_async
raise e
File "/app/.venv/lib/python3.12/site-packages/litellm/utils.py", line 1484, in wrapper_async
result = await original_function(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/main.py", line 617, in acompletion
raise exception_type(
^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.12/site-packages/litellm/litellm_cor e_utils/exception_mapping_utils.py", line 2323, in exception_type
raise e
File "/app/.venv/lib/python3.12/site-packages/litellm/litellm_cor e_utils/exception_mapping_utils.py", line 522, in exception_type
raise InternalServerError(
litellm.exceptions.InternalServerError: litellm.InternalServerError: InternalServerError: OpenAIException - Connection error.
INFO: 172.19.0.6:33408 - "GET /v1/asks/c52e5af7-69d2-4414-a42d-2121b5242de2/result HTTP/1.1" 200 OK

Metadata

Metadata

Assignees

No one assigned

    Labels

    bugSomething isn't working

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions