From aba0951534191ff35ee8b83ee58169e8ee4c6998 Mon Sep 17 00:00:00 2001 From: lasseedfast Date: Wed, 21 May 2025 16:19:10 +0200 Subject: [PATCH] Refactor package structure: rename to "llm_client", update imports, and enhance setup configuration --- README.md | 10 +++++----- __init__.py | 7 +++++++ _llm/__init__.py | 9 ++++++--- _llm/llm.py | 3 --- llm_client.py | 7 +++++++ setup.py | 9 +++++++-- 6 files changed, 32 insertions(+), 13 deletions(-) create mode 100644 __init__.py create mode 100644 llm_client.py diff --git a/README.md b/README.md index 91b815c..61316e2 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# _llm +# llm_client A Python package for interacting with LLM models through Ollama, supporting both remote API and local Ollama instances. @@ -47,7 +47,7 @@ These can be set in a `.env` file in your project directory or in the ArangoDB e ## Basic Usage ```python -from _llm import LLM +from llm_client import LLM # Initialize the LLM llm = LLM() @@ -64,7 +64,7 @@ print(result.content) ### Working with Images ```python -from _llm import LLM +from llm_client import LLM llm = LLM() response = llm.generate( @@ -77,7 +77,7 @@ response = llm.generate( ### Streaming Responses ```python -from _llm import LLM +from llm_client import LLM llm = LLM() for chunk_type, chunk in llm.generate( @@ -91,7 +91,7 @@ for chunk_type, chunk in llm.generate( ```python import asyncio -from _llm import LLM +from llm_client import LLM async def main(): llm = LLM() diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..bdcc984 --- /dev/null +++ b/__init__.py @@ -0,0 +1,7 @@ +""" +llm_client: A Python package for interacting with LLM models through Ollama. +""" + +from _llm.llm import LLM + +__all__ = ["LLM"] \ No newline at end of file diff --git a/_llm/__init__.py b/_llm/__init__.py index 18eb6ba..bdcc984 100644 --- a/_llm/__init__.py +++ b/_llm/__init__.py @@ -1,4 +1,7 @@ -from .llm import LLM, remove_thinking +""" +llm_client: A Python package for interacting with LLM models through Ollama. +""" -__version__ = "0.1.0" -__all__ = ["LLM", "remove_thinking"] \ No newline at end of file +from _llm.llm import LLM + +__all__ = ["LLM"] \ No newline at end of file diff --git a/_llm/llm.py b/_llm/llm.py index 349993f..482fe2a 100644 --- a/_llm/llm.py +++ b/_llm/llm.py @@ -19,7 +19,6 @@ from colorprinter.print_color import * env_manager.set_env() -print(os.environ) tokenizer = tiktoken.get_encoding("cl100k_base") @@ -306,7 +305,6 @@ class LLM: Generate a response based on the provided query and context. """ model = self._prepare_messages_and_model(query, user_input, context, messages, images, model) - print(f"[generate] model after _prepare_messages_and_model: {model}") temperature = temperature if temperature else self.options["temperature"] if not force_local: try: @@ -393,7 +391,6 @@ class LLM: keep_alive=3600 * 24 * 7, ) summary = response.message.content.strip() - print_blue("Summary:", summary) return summary except ResponseError as e: print_red("Error generating summary:", e) diff --git a/llm_client.py b/llm_client.py new file mode 100644 index 0000000..342d7d4 --- /dev/null +++ b/llm_client.py @@ -0,0 +1,7 @@ +""" +llm_client package entry point to simplify imports +""" + +from _llm import LLM + +__all__ = ["LLM"] \ No newline at end of file diff --git a/setup.py b/setup.py index 441c6c9..d9e3664 100644 --- a/setup.py +++ b/setup.py @@ -1,9 +1,14 @@ +""" +llm_client package setuptools configuration. +""" + from setuptools import setup, find_packages setup( - name="llm_client", # Changed from "_llm" to "llm_client" + name="llm_client", version="0.1.0", - packages=find_packages(), + packages=["_llm"], + py_modules=["llm_client"], install_requires=[ "requests", "tiktoken",