ghmark675
Committed by BaiFu

hotfix(timeout): increase llm call timeout to 1800s

@@ -38,11 +38,11 @@ class LLMClient: @@ -38,11 +38,11 @@ class LLMClient:
38 self.base_url = base_url 38 self.base_url = base_url
39 self.model_name = model_name 39 self.model_name = model_name
40 self.provider = model_name 40 self.provider = model_name
41 - timeout_fallback = os.getenv("LLM_REQUEST_TIMEOUT") or os.getenv("INSIGHT_ENGINE_REQUEST_TIMEOUT") or "180" 41 + timeout_fallback = os.getenv("LLM_REQUEST_TIMEOUT") or os.getenv("INSIGHT_ENGINE_REQUEST_TIMEOUT") or "1800"
42 try: 42 try:
43 self.timeout = float(timeout_fallback) 43 self.timeout = float(timeout_fallback)
44 except ValueError: 44 except ValueError:
45 - self.timeout = 300.0 45 + self.timeout = 1800.0
46 46
47 client_kwargs: Dict[str, Any] = { 47 client_kwargs: Dict[str, Any] = {
48 "api_key": api_key, 48 "api_key": api_key,
@@ -41,11 +41,11 @@ class LLMClient: @@ -41,11 +41,11 @@ class LLMClient:
41 self.base_url = base_url 41 self.base_url = base_url
42 self.model_name = model_name 42 self.model_name = model_name
43 self.provider = model_name 43 self.provider = model_name
44 - timeout_fallback = os.getenv("LLM_REQUEST_TIMEOUT") or os.getenv("MEDIA_ENGINE_REQUEST_TIMEOUT") or "180" 44 + timeout_fallback = os.getenv("LLM_REQUEST_TIMEOUT") or os.getenv("MEDIA_ENGINE_REQUEST_TIMEOUT") or "1800"
45 try: 45 try:
46 self.timeout = float(timeout_fallback) 46 self.timeout = float(timeout_fallback)
47 except ValueError: 47 except ValueError:
48 - self.timeout = 300.0 48 + self.timeout = 1800.0
49 49
50 client_kwargs: Dict[str, Any] = { 50 client_kwargs: Dict[str, Any] = {
51 "api_key": api_key, 51 "api_key": api_key,
@@ -38,11 +38,11 @@ class LLMClient: @@ -38,11 +38,11 @@ class LLMClient:
38 self.base_url = base_url 38 self.base_url = base_url
39 self.model_name = model_name 39 self.model_name = model_name
40 self.provider = model_name 40 self.provider = model_name
41 - timeout_fallback = os.getenv("LLM_REQUEST_TIMEOUT") or os.getenv("QUERY_ENGINE_REQUEST_TIMEOUT") or "180" 41 + timeout_fallback = os.getenv("LLM_REQUEST_TIMEOUT") or os.getenv("QUERY_ENGINE_REQUEST_TIMEOUT") or "1800"
42 try: 42 try:
43 self.timeout = float(timeout_fallback) 43 self.timeout = float(timeout_fallback)
44 except ValueError: 44 except ValueError:
45 - self.timeout = 180.0 45 + self.timeout = 1800.0
46 46
47 client_kwargs: Dict[str, Any] = { 47 client_kwargs: Dict[str, Any] = {
48 "api_key": api_key, 48 "api_key": api_key,
@@ -38,11 +38,11 @@ class LLMClient: @@ -38,11 +38,11 @@ class LLMClient:
38 self.base_url = base_url 38 self.base_url = base_url
39 self.model_name = model_name 39 self.model_name = model_name
40 self.provider = model_name 40 self.provider = model_name
41 - timeout_fallback = os.getenv("LLM_REQUEST_TIMEOUT") or os.getenv("REPORT_ENGINE_REQUEST_TIMEOUT") or "180" 41 + timeout_fallback = os.getenv("LLM_REQUEST_TIMEOUT") or os.getenv("REPORT_ENGINE_REQUEST_TIMEOUT") or "1800"
42 try: 42 try:
43 self.timeout = float(timeout_fallback) 43 self.timeout = float(timeout_fallback)
44 except ValueError: 44 except ValueError:
45 - self.timeout = 300.0 45 + self.timeout = 1800.0
46 46
47 client_kwargs: Dict[str, Any] = { 47 client_kwargs: Dict[str, Any] = {
48 "api_key": api_key, 48 "api_key": api_key,