You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

111 lines
3.2 KiB

#!/usr/bin/env python3
"""
View Latest LLM Server Results
This script displays the latest document summaries generated by the LLM server
directly in the terminal, providing a quick way to check results without
having to use a web browser.
Usage:
python view_latest_results.py [--raw] [--json]
Options:
--raw Display the raw result data
--json Format the output as JSON
"""
import json
import os
import sys
import argparse
from datetime import datetime
def load_latest_result():
"""Load the latest result from the JSON file."""
latest_result_file = os.path.join(os.path.dirname(__file__), "latest_summary_result.json")
try:
if os.path.exists(latest_result_file):
with open(latest_result_file, 'r') as f:
return json.load(f)
else:
print(f"No results file found at {latest_result_file}")
return None
except Exception as e:
print(f"Error loading results: {e}")
return None
def display_raw(result):
"""Display the raw result data."""
print(json.dumps(result, indent=2))
def display_formatted(result):
"""Display the result in a nicely formatted way."""
if not result:
print("No results available")
return
print("\n" + "=" * 80)
print(f"DOCUMENT: {result.get('_id', 'Unknown')}")
print("=" * 80)
# Document summary
summary = result.get("summary", {}).get("text_sum", "No summary available")
print("\n📄 DOCUMENT SUMMARY")
print("-" * 80)
print(summary)
# Model info if available
if "summary" in result and "meta" in result["summary"]:
meta = result["summary"]["meta"]
model = meta.get("model", "Unknown")
temp = meta.get("temperature", "Unknown")
print(f"\nGenerated using: {model} (temperature: {temp})")
# Display chunks
chunks = result.get("chunks", [])
if chunks:
summarized_chunks = [chunk for chunk in chunks if "summary" in chunk]
print(f"\n🧩 CHUNK SUMMARIES ({len(summarized_chunks)}/{len(chunks)} chunks processed)")
for i, chunk in enumerate(summarized_chunks):
print("\n" + "-" * 80)
print(f"Chunk {i+1}:")
print("-" * 80)
print(chunk["summary"])
# Display tags
if "tags" in chunk and chunk["tags"]:
print("\nTags:", ", ".join(chunk["tags"]))
# Display references
if "references" in chunk and chunk["references"]:
print("\nReferences:")
for ref in chunk["references"]:
print(f"- {ref}")
print("\n" + "=" * 80)
def main():
parser = argparse.ArgumentParser(description='View latest LLM server results')
parser.add_argument('--raw', action='store_true', help='Display raw result data')
parser.add_argument('--json', action='store_true', help='Format output as JSON')
args = parser.parse_args()
result = load_latest_result()
if not result:
print("No results available")
return
if args.raw or args.json:
display_raw(result)
else:
display_formatted(result)
if __name__ == "__main__":
main()