File size: 4,602 Bytes
36b7bc3
 
 
 
6e15e69
 
 
 
 
 
3ae9a9b
 
36b7bc3
 
3bf4f99
 
8af2c34
c96df93
 
 
 
 
3bf4f99
 
36b7bc3
 
 
 
 
 
 
 
 
 
 
 
6e15e69
 
 
 
 
 
 
36b7bc3
 
6e15e69
 
 
 
 
 
36b7bc3
 
6e15e69
 
 
 
 
 
b9b32f2
36b7bc3
6e15e69
 
 
 
 
 
36b7bc3
6e15e69
 
 
 
 
 
36b7bc3
6e15e69
 
 
 
 
 
b9b32f2
 
 
3bf4f99
f32eb99
 
 
 
 
 
 
 
36b7bc3
 
 
 
 
 
 
 
 
 
6e15e69
 
 
 
36b7bc3
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import streamlit as st 
from streamlit_option_menu import option_menu
from transformers import pipeline, Conversation

# convo = pipeline(task="conversational", model="microsoft/DialoGPT-medium")
# imgclassifier = pipeline(model="microsoft/beit-base-patch16-224-pt22k-ft22k")
# qnabot = pipeline(task="question-answering", model="distilbert-base-cased-distilled-squad")
# txtgen = pipeline(task="text-generation", model="EleutherAI/gpt-neo-2.7B")
# txtclassifi = pipeline(task="text-classification", model="nlptown/bert-base-multilingual-uncased-sentiment")
# summurize = pipeline(task="summarization", model="sshleifer/distilbart-cnn-12-6")
# visualqna = pipeline(task="vqa", model="microsoft/DialoGPT-medium")
visualqna = pipeline(model="dandelin/vilt-b32-finetuned-vqa")


def load_image():
    with st.sidebar:
        if img := st.text_input("Enter Image URL") or st.selectbox("Select Image", ("https://images.unsplash.com/photo-1593466144596-8abd50ad2c52?ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D&auto=format&fit=crop&w=3434&q=80", "https://images.unsplash.com/photo-1566438480900-0609be27a4be?ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D&auto=format&fit=crop&w=3394&q=80")):
            if st.button("Load Image"):
                st.write("Image Uploaded!")
                st.image(img)
        else:
            st.warning("Please enter an image URL and click 'Load Image' before asking a question.")
    return img

# def homepage():
#     st.write("Timeline")
#     # allmessages =[]
#     if "messages" not in st.session_state:
#         st.session_state.messages = []

#     if usrmsg := st.chat_input("Share a thought"):
#         st.session_state.messages.append(usrmsg)
#         with st.chat_message("user"):
#             st.session_state.messages
    
    
# def chat():
#     st.title("Chit-Chatbot")
#     if query := st.chat_input("Enter your message"):
#         uquery = Conversation(query)
#         response = convo(uquery)
#         with st.chat_message("assistant"):
#             st.write(response.generated_responses[-1])
        
    
# def image_classifi():
#     st.title("Image Classification")
#     file = st.text_input("Enter Image URL")
#     output = imgclassifier(file)
#     if st.button("View Results"):
#         st.write(output)
        
    
# def qna_bot():
#     st.title("Q&A-Chatbot")
#     if query := st.chat_input("Enter your message"):
#         response = qnabot(query)
#         with st.chat_message("assistant"):
#             st.write(response)
        
    
# def txt_gen():
#     st.title("Text Generation")
#     if query := st.chat_input("Enter your message"):
#         response = txtgen(query)
#         with st.chat_message("assistant"):
#             st.write(response)
    
# def txt_classifi():
#     st.title("Text Classification")
#     if query := st.chat_input("Enter your message"):
#         response = txtclassifi(query,)
#         with st.chat_message("assistant"):
#             st.write(response)
    
# def summury():
#     st.title("Summury")
#     if query := st.chat_input("Enter your message"):
#         response = summurize(query, min_length=5, max_length=20)
#         with st.chat_message("assistant"):
#             st.write(response)

def visual_qna():
    st.title("Visual Q&A")
    img = load_image()
    if img:
        if query := st.chat_input("Enter your message"):
            response = visualqna(question=query, image=img)
            with st.chat_message("assistant"):
                st.write(response)
    else:
        st.warning("Please enter an image URL and click 'Load Image' before asking a question.")




def dashboard():
    
    with st.sidebar:
        selected = option_menu(None, ['Conversational', "Q&A", "Text Generation", "Text Classification", "Image Classification", "Summurization", "Visual Q&A" , "Logout"],
                               icons=['πŸ’¬','❓', 'πŸ“', 'πŸ”€', 'πŸ–ΌοΈ', 'πŸ“‘', 'πŸ”Ž', 'πŸ”“'])
    # if selected == 'Home':
    #     homepage()
    if selected == 'Visual Q&A':
        visual_qna()
    # elif selected == "Image Classification":
    #     image_classifi()
    elif selected == 'Logout':
        st.session_state.user = None
        st.experimental_rerun()
    # elif selected == "Invoke Document":
    #     invoke_document()
    # elif selected == "Invoke Audio":
    #     invoke_audio()
    # elif selected == "Invoke Video":
    #     invoke_video()
    # elif selected == "Invoke Image":
    #     invoke_image()
    # elif selected == "Invoke Text":
    #     invoke_text()