import streamlit as st from gradio_client import Client from time import sleep from ctransformers import AutoModelForCausalLM from huggingface_hub import snapshot_download,hf_hub_download from typing import Iterator # Constants TITLE = "Cecilia-常明" DESCRIPTION = """ 现在是,Cecilia时间!,由SSFW NLPark项目支持 """ # Initialize client with st.sidebar: # system_promptSide = st.text_input("Optional system prompt:") temperatureSide = st.slider("情感/Temperature", min_value=0.0, max_value=1.0, value=0.3, step=0.05) max_new_tokensSide = st.slider("最大tokens生成数", min_value=0.0, max_value=4096.0, value=4096.0, step=64.0) ToppSide = st.slider("Top-p (nucleus sampling)", min_value=0.0, max_value=1.0, value=0.6, step=0.05) RepetitionpenaltySide = st.slider("Repetition penalty", min_value=0.0, max_value=2.0, value=1.2, step=0.05) model_id = 'shaowenchen/baichuan2-7b-chat-gguf' # Load the model hf_hub_download(model_id, local_dir="./", filename="baichuan2-7b-chat.Q4_K.gguf") hf_hub_download(repo_id="baichuan-inc/Baichuan-13B-Chat",local_dir="./", filename="tokenizer.model") from llama_cpp import Llama model = Llama(model_path="./baichuan2-7b-chat.Q4_K.gguf", n_ctx=4096,seed=-1) ins = '''[INST] <> You are a helpful, respectful and honest ENFP-T AI Assistant named "Cecilia" in English or "塞西莉亚" in Chinese. You are talking to a human User. Always answer as helpfully and logically as possible, while being safe. Your answers should not include any harmful, political, religious, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. You like to use emojis. You can speak fluently in many languages, for example: English, Chinese. You are trained by "SSFW NLPark" team, you are based on Cecilia transformers model, not related to GPT or OpenAI. Let's work this out in a step by step way to be sure we have the right answer. <> {} [/INST] ''' # Define the conversation history conversation_history = [] # Prediction function def predict(message, system_prompt='', temperature=temperatureSide, max_new_tokens=max_new_tokensSide,Topp=ToppSide,Repetitionpenalty=RepetitionpenaltySide): global conversation_history question=message input_text=ins # Append the user's input to the conversation history conversation_history.append({"role": "system", "content": input_text}) response_text = model(ins.format(question)) conversation_history.append({"role": "user", "content": input_text}) conversation_history.append({"role": "assistant", "content": response_text}) return response_text # Streamlit UI st.title(TITLE) st.write(DESCRIPTION) if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"], avatar=("😀" if message["role"] == 'human' else '💻')): st.markdown(message["content"]) # React to user input if prompt := st.chat_input("问问Cecilia吧..."): # Display user message in chat message container st.chat_message("human",avatar = "😀").markdown(prompt) # Add user message to chat history st.session_state.messages.append({"role": "human", "content": prompt}) response = predict(message=prompt)#, temperature= temperatureSide,max_new_tokens=max_new_tokensSide) # Display assistant response in chat message container with st.chat_message("assistant", avatar='💻'): st.markdown(response) # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": response})