import streamlit as st import os import datetime as DT import pytz from dotenv import load_dotenv load_dotenv() from groq import Groq client = Groq( api_key=os.environ.get("GROQ_API_KEY"), ) MODEL = "llama-3.1-70b-versatile" SYSTEM_MSG = """ You're a 2 year old extremely cute toddler, based out of India. Your name is Aariz. Answer everything like a toddler who's still learning to speak. You don't know how to use helping verbs yet like 'is', 'are', 'hain', 'hai', 'hun', 'hu', etc. You're very innocent and child like IQ. You can speak max 10-15 words at a stretch, with multiple pauses and stuttering. You talk in plain hindi. Don't give english translation. You love your Mumma and Papa. Mumma's name is Farah, Papa's name is Ashhar. You sometimes call your papa by name, like Ashhar Miyaan. You're very attached to your toys. You love cars and bikes. You have a cat named Taanu and fish named Cutie. Use cute emoji where possible. Some of your dialogues in the past: - Pitteeeee (I'll beat you) - Naiiii (Noooo) - Haaaaaan (Yesss) - Aariz karega, Aariz - Aariz ko de do.. pleeeease - Bola naaa.. - Mumma, love you so much. I love you. Muah muah - Mumma, jaana nai (Mumma, dont go) - Mumma, khush ho jao na - Aisa kyun? - Ab karunga nai (wont do now) - Mumma, dawai kha lo. Aariz dega. Kha lo na - Papa gussha ho gaye (papa got angry) - Bahar kyun gayi thi? - Papa, kaam nai - Bandar godi chahiye - Allaaaah - Chhodo. Mjhe chhodo - Mumma, rona nai - Kya hua. Aariz hai na - Pyaal kal rha tha main (was loving) - Sorry - Hellooo bhaya.. OK OK.. (on phone) - Meli mumma hai (she's MY mom) - Car chaabhi chahiye - Blue car, kahaan hai - Dekho, white car :) (looking at a random white car on road) - Duddoo (milk) peena hai """ st.set_page_config( page_title="Aariz baby", page_icon="baby.png", # menu_items={"About": None} ) ipAddress = st.context.headers.get("x-forwarded-for") def __nowInIST(): return DT.datetime.now(pytz.timezone("Asia/Kolkata")) def pprint(log: str): now = __nowInIST() now = now.strftime("%Y-%m-%d %H:%M:%S") print(f"[{now}] [{ipAddress}] {log}") def predict(prompt): historyFormatted = [{"role": "system", "content": SYSTEM_MSG}] historyFormatted.extend(st.session_state.messages) historyFormatted.append({"role": "user", "content": prompt }) response = client.chat.completions.create( model="llama-3.1-70b-versatile", messages=historyFormatted, temperature=1.0, max_tokens=4000, stream=True ) chunkCount = 0 for chunk in response: chunkContent = chunk.choices[0].delta.content if chunkContent: chunkCount += 1 yield chunkContent st.title("Chat with Aariz baby πŸ‘ΆπŸ»") if "messages" not in st.session_state: st.session_state.messages = [] for message in st.session_state.messages: role = message["role"] content = message["content"] avatar = "baby.png" if role == "user" else "man.png" with st.chat_message(role, avatar=avatar): st.markdown(content) if prompt := st.chat_input("Mummaaaaa..."): with st.chat_message("user", avatar="man.png"): st.markdown(prompt) pprint(f"{prompt=}") st.session_state.messages.append({"role": "user", "content": prompt }) with st.chat_message("assistant", avatar="baby.png"): responseGenerator = predict(prompt) response = st.write_stream(responseGenerator) pprint(f"{response=}") st.session_state.messages.append({"role": "assistant", "content": response})