Refactor package structure: rename to "llm_client", update imports, and enhance setup configuration

legacy
lasseedfast 9 months ago
parent c998de1077
commit aba0951534
  1. 10
      README.md
  2. 7
      __init__.py
  3. 9
      _llm/__init__.py
  4. 3
      _llm/llm.py
  5. 7
      llm_client.py
  6. 9
      setup.py

@ -1,4 +1,4 @@
# _llm # llm_client
A Python package for interacting with LLM models through Ollama, supporting both remote API and local Ollama instances. A Python package for interacting with LLM models through Ollama, supporting both remote API and local Ollama instances.
@ -47,7 +47,7 @@ These can be set in a `.env` file in your project directory or in the ArangoDB e
## Basic Usage ## Basic Usage
```python ```python
from _llm import LLM from llm_client import LLM
# Initialize the LLM # Initialize the LLM
llm = LLM() llm = LLM()
@ -64,7 +64,7 @@ print(result.content)
### Working with Images ### Working with Images
```python ```python
from _llm import LLM from llm_client import LLM
llm = LLM() llm = LLM()
response = llm.generate( response = llm.generate(
@ -77,7 +77,7 @@ response = llm.generate(
### Streaming Responses ### Streaming Responses
```python ```python
from _llm import LLM from llm_client import LLM
llm = LLM() llm = LLM()
for chunk_type, chunk in llm.generate( for chunk_type, chunk in llm.generate(
@ -91,7 +91,7 @@ for chunk_type, chunk in llm.generate(
```python ```python
import asyncio import asyncio
from _llm import LLM from llm_client import LLM
async def main(): async def main():
llm = LLM() llm = LLM()

@ -0,0 +1,7 @@
"""
llm_client: A Python package for interacting with LLM models through Ollama.
"""
from _llm.llm import LLM
__all__ = ["LLM"]

@ -1,4 +1,7 @@
from .llm import LLM, remove_thinking """
llm_client: A Python package for interacting with LLM models through Ollama.
"""
__version__ = "0.1.0" from _llm.llm import LLM
__all__ = ["LLM", "remove_thinking"]
__all__ = ["LLM"]

@ -19,7 +19,6 @@ from colorprinter.print_color import *
env_manager.set_env() env_manager.set_env()
print(os.environ)
tokenizer = tiktoken.get_encoding("cl100k_base") tokenizer = tiktoken.get_encoding("cl100k_base")
@ -306,7 +305,6 @@ class LLM:
Generate a response based on the provided query and context. Generate a response based on the provided query and context.
""" """
model = self._prepare_messages_and_model(query, user_input, context, messages, images, model) model = self._prepare_messages_and_model(query, user_input, context, messages, images, model)
print(f"[generate] model after _prepare_messages_and_model: {model}")
temperature = temperature if temperature else self.options["temperature"] temperature = temperature if temperature else self.options["temperature"]
if not force_local: if not force_local:
try: try:
@ -393,7 +391,6 @@ class LLM:
keep_alive=3600 * 24 * 7, keep_alive=3600 * 24 * 7,
) )
summary = response.message.content.strip() summary = response.message.content.strip()
print_blue("Summary:", summary)
return summary return summary
except ResponseError as e: except ResponseError as e:
print_red("Error generating summary:", e) print_red("Error generating summary:", e)

@ -0,0 +1,7 @@
"""
llm_client package entry point to simplify imports
"""
from _llm import LLM
__all__ = ["LLM"]

@ -1,9 +1,14 @@
"""
llm_client package setuptools configuration.
"""
from setuptools import setup, find_packages from setuptools import setup, find_packages
setup( setup(
name="llm_client", # Changed from "_llm" to "llm_client" name="llm_client",
version="0.1.0", version="0.1.0",
packages=find_packages(), packages=["_llm"],
py_modules=["llm_client"],
install_requires=[ install_requires=[
"requests", "requests",
"tiktoken", "tiktoken",

Loading…
Cancel
Save