chore(anthropic_token_limiter.py): comment out max_input_tokens and related debug prints to clean up code and reduce clutter during execution
This commit is contained in:
parent
d15d249929
commit
7cfbcb5a2e
|
|
@ -115,8 +115,7 @@ def state_modifier(
|
||||||
|
|
||||||
wrapped_token_counter = create_token_counter_wrapper(model.model)
|
wrapped_token_counter = create_token_counter_wrapper(model.model)
|
||||||
|
|
||||||
# Keep max_input_tokens at 21000 as requested
|
# max_input_tokens = 33440
|
||||||
max_input_tokens = 21000
|
|
||||||
|
|
||||||
print("\nDEBUG - Starting token trimming with max_tokens:", max_input_tokens)
|
print("\nDEBUG - Starting token trimming with max_tokens:", max_input_tokens)
|
||||||
print(f"Current token total: {wrapped_token_counter(messages)}")
|
print(f"Current token total: {wrapped_token_counter(messages)}")
|
||||||
|
|
@ -143,12 +142,12 @@ def state_modifier(
|
||||||
|
|
||||||
if len(result) < len(messages):
|
if len(result) < len(messages):
|
||||||
print(f"TRIMMED: {len(messages)} messages → {len(result)} messages")
|
print(f"TRIMMED: {len(messages)} messages → {len(result)} messages")
|
||||||
total_tokens_after = wrapped_token_counter(result)
|
# total_tokens_after = wrapped_token_counter(result)
|
||||||
print(f"New token total: {total_tokens_after}")
|
# print(f"New token total: {total_tokens_after}")
|
||||||
print("BEFORE TRIMMING")
|
# print("BEFORE TRIMMING")
|
||||||
print_messages_compact(messages)
|
# print_messages_compact(messages)
|
||||||
print("AFTER TRIMMING")
|
# print("AFTER TRIMMING")
|
||||||
print_messages_compact(result)
|
# print_messages_compact(result)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue