import streamlit as st from time import sleep from colorprinter.print_color import * from _base_class import StreamlitBaseClass class SettingsPage(StreamlitBaseClass): def __init__(self, username: str): super().__init__(username=username) def run(self): self.update_current_page("Settings") self.set_profile_picture() self.use_reasoning_model() def set_profile_picture(self): st.markdown("Profile picture") profile_picture = st.file_uploader( "Upload profile picture", type=["png", "jpg", "jpeg"] ) if profile_picture: # Resize the image to 64x64 pixels from PIL import Image img = Image.open(profile_picture) img.thumbnail((64, 64)) img_path = f"user_data/{st.session_state['username']}/profile_picture.png" img.save(img_path) self.update_settings("avatar", img_path) st.success("Profile picture uploaded") sleep(1) def use_reasoning_model(self): """ Displays a checkbox in the Streamlit interface to enable or disable the reasoning model for generating responses in chats. Retrieves the current settings and checks if the "use_reasoning_model" key exists. If not, it initializes it to False. Then, it displays a markdown text and a checkbox for the user to toggle the reasoning model usage. The updated setting is saved back to the settings. Returns: None """ settings = self.get_settings() if "use_reasoning_model" not in settings: settings["use_reasoning_model"] = False st.markdown("Use Reasoning Model") use_reasoning_model = st.checkbox("Use Reasoning Model", value=settings["use_reasoning_model"], help="Use the reasoning model to generate responses in chats. This may take longer to process.") self.update_settings("use_reasoning_model", use_reasoning_model)