File size: 2,988 Bytes
59812f5
141ba59
c86c2f3
 
 
d2d3f64
c86c2f3
141ba59
c86c2f3
4522cd0
 
59812f5
4522cd0
141ba59
8e47a5a
4522cd0
 
8e47a5a
e6dd388
 
c86c2f3
8e47a5a
c86c2f3
1827259
4522cd0
c86c2f3
141ba59
c86c2f3
141ba59
 
 
 
 
 
 
 
 
 
 
 
 
 
8e47a5a
 
 
 
 
 
 
 
c86c2f3
8e47a5a
c86c2f3
 
141ba59
 
 
b4ca5ac
141ba59
09b3f75
c86c2f3
 
 
 
141ba59
 
09b3f75
c86c2f3
4522cd0
c86c2f3
141ba59
 
 
09b3f75
c86c2f3
 
 
141ba59
 
 
09b3f75
c86c2f3
4522cd0
c86c2f3
 
141ba59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1827259
141ba59
 
 
 
e6dd388
 
89f9579
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import os
from threading import Thread
from typing import Iterator

import gradio as gr
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer

MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))

DESCRIPTION = """\
# MIQU-70B
"""

LICENSE = """Pirate license
"""

if not torch.cuda.is_available():
    DESCRIPTION += ""


def generate(
    message: str,
    chat_history: list[tuple[str, str]],
    system_prompt: str,
    max_new_tokens: int = 1024,
    temperature: float = 0.6,
    top_p: float = 0.9,
    top_k: int = 50,
    repetition_penalty: float = 1.2,
) -> Iterator[str]:
    conversation = []
    if system_prompt:
        conversation.append({"role": "system", "content": system_prompt})
    for user, assistant in chat_history:
        conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
    conversation.append({"role": "user", "content": message})


    r = requests.post('https://social-warthog-fleet.ngrok-free.app/api/chat', params={
  "model": "miqu",
  "messages":conversation,
  "stream": false,
  "options":{"num_predict": 128, "temperature":1, "top_p":0.95}

        })
    outputs = [r.json()['message']['content']]

    yield "".join(outputs)


chat_interface = gr.ChatInterface(
    fn=generate,
    additional_inputs=[
        gr.Textbox(label="System prompt", lines=6),
        gr.Slider(
            label="Max new tokens",
            minimum=1,
            maximum=MAX_MAX_NEW_TOKENS,
            step=1,
            value=DEFAULT_MAX_NEW_TOKENS,
        ),
        gr.Slider(
            label="Temperature",
            minimum=0.1,
            maximum=4.0,
            step=0.1,
            value=0.6,
        ),
        gr.Slider(
            label="Top-p (nucleus sampling)",
            minimum=0.05,
            maximum=1.0,
            step=0.05,
            value=0.9,
        ),
        gr.Slider(
            label="Top-k",
            minimum=1,
            maximum=1000,
            step=1,
            value=50,
        ),
        gr.Slider(
            label="Repetition penalty",
            minimum=1.0,
            maximum=2.0,
            step=0.05,
            value=1.2,
        ),
    ],
    stop_btn=None,
    examples=[
        ["Hello there! How are you doing?"],
        ["Can you explain briefly to me what is the Python programming language?"],
        ["Explain the plot of Cinderella in a sentence."],
        ["How many hours does it take a man to eat a Helicopter?"],
        ["Write a 100-word article on 'Benefits of Open-Source in AI research'"],
    ],
)

with gr.Blocks(css="style.css") as demo:
    gr.Markdown(DESCRIPTION)
    gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
    chat_interface.render()
    gr.Markdown(LICENSE)

if __name__ == "__main__":
    demo.queue(max_size=20).launch()