JatsTheAIGen commited on
Commit
bd329bc
·
1 Parent(s): f608c70

Fix indentation issues in prepare_context_for_llm and _truncate_to_tokens methods

Browse files
Files changed (1) hide show
  1. src/llm_router.py +4 -4
src/llm_router.py CHANGED
@@ -363,7 +363,7 @@ class LLMRouter:
363
  continue
364
 
365
  # Estimate tokens (simple: 1 token ≈ 4 chars)
366
- tokens = len(content) // 4
367
 
368
  if total_tokens + tokens <= max_tokens:
369
  formatted_context.append(f"=== {element.upper()} ===\n{content}")
@@ -382,7 +382,7 @@ class LLMRouter:
382
  def _truncate_to_tokens(self, content: str, max_tokens: int) -> str:
383
  """Truncate content to fit within token limit"""
384
  # Simple character-based truncation (1 token ≈ 4 chars)
385
- max_chars = max_tokens * 4
386
- if len(content) <= max_chars:
387
- return content
388
  return content[:max_chars - 3] + "..."
 
363
  continue
364
 
365
  # Estimate tokens (simple: 1 token ≈ 4 chars)
366
+ tokens = len(content) // 4
367
 
368
  if total_tokens + tokens <= max_tokens:
369
  formatted_context.append(f"=== {element.upper()} ===\n{content}")
 
382
  def _truncate_to_tokens(self, content: str, max_tokens: int) -> str:
383
  """Truncate content to fit within token limit"""
384
  # Simple character-based truncation (1 token ≈ 4 chars)
385
+ max_chars = max_tokens * 4
386
+ if len(content) <= max_chars:
387
+ return content
388
  return content[:max_chars - 3] + "..."