Quardo commited on
Commit
4d70c71
1 Parent(s): 5799683

Updated Space

Browse files
Files changed (4) hide show
  1. README.md +38 -2
  2. app.py +163 -84
  3. index.html +4 -0
  4. requirements.txt +2 -1
README.md CHANGED
@@ -9,7 +9,43 @@ app_file: app.py
9
  pinned: false
10
  app_port: 7860
11
  license: wtfpl
12
- short_description: A simple OpenAI API proxy.
13
  ---
14
 
15
- A simple OpenAI API proxy.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  pinned: false
10
  app_port: 7860
11
  license: wtfpl
12
+ short_description: An OpenAI API proxy.
13
  ---
14
 
15
+ # An OpenAI API proxy.
16
+ Welcome to GPT-4O-mini! This space acts as a proxy to the OpenAI API, letting you interact with models to generate responses, images, and more. Here's a quick rundown:
17
+
18
+ ### Key Parts:
19
+ - **app.py**: Handles API requests and responses.
20
+ - **Gradio Interface**: Easy-to-use interface for text and image input.
21
+ - **FastAPI**: Fast web framework for building APIs.
22
+
23
+ ### Features:
24
+ - **Model Selection**: Pick from GPT-4, GPT-4-turbo, and GPT-4O-mini.
25
+ - **Image Generation**: Create images with DALL-E-3.
26
+ - **Math Calculations**: Do math with the fakeTool.
27
+ - **Moderation**: Automated content checks.
28
+ - **User Consent**: Agree to terms before use.
29
+
30
+ ### How to Use:
31
+ 1. **Select a Model**: Pick a model.
32
+ 2. **Input Your Message**: Type your prompt.
33
+ 3. **Generate Responses**: Click submit.
34
+ 4. **Image Generation**: Use fakeTool with a JSON prompt.
35
+ 5. **Math Calculations**: Use fakeTool with a JSON prompt.
36
+
37
+ ### Example Prompts:
38
+ - **Image Generation**:
39
+ ```json
40
+ {"tool": "imagine", "isCall": true, "prompt": "A golden retriever on a modern couch"}
41
+ ```
42
+ - **Math Calculation**:
43
+ ```json
44
+ {"tool": "calc", "isCall": true, "prompt": "math.pi * 5"}
45
+ ```
46
+
47
+ ### More Info:
48
+ - **API Documentation**: [here](/api/v1/docs).
49
+ - **Latest Update**: Added math faketool.
50
+
51
+ Enjoy exploring GPT-4O-mini!
app.py CHANGED
@@ -14,6 +14,7 @@ import string
14
  import base64
15
  import json
16
  import time
 
17
  import sys
18
  import os
19
 
@@ -24,7 +25,89 @@ IMAGE_HANDLE = "url"# or "base64"
24
  API_BASE = "env"# or "openai"
25
  api_key = os.environ['OPENAI_API_KEY']
26
  base_url = os.environ.get('OPENAI_BASE_URL', "https://api.openai.com/v1")
27
- def_models = '["gpt-3.5-turbo", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-4", "gpt-4-0125-preview", "gpt-4-0314", "gpt-4-0613", "gpt-4-1106-preview", "gpt-4-1106-vision-preview", "gpt-4-32k-0314", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-mini", "gpt-4o-mini-2024-07-18"]'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
  # --- === CONFIG === ---
30
 
@@ -98,19 +181,28 @@ def handleApiKeys():
98
  if ('data' in response.json()):
99
  output.append(key)
100
  except Exception as e:
101
- print(("API key {" + key + "} is not valid or an actuall error happend {" + e + "}"))
102
  if len(output)==1:
103
  raise RuntimeError("No API key is working")
104
  api_key = ",".join(output)
105
  else:
106
  try:
107
- response = requests.get(f"{base_url}/models", headers={"Authorization": f"Bearer {key}"})
108
  response.raise_for_status()
109
  if not ('data' in response.json()):
110
  raise RuntimeError("Current API key is not valid")
111
  except Exception as e:
112
- raise RuntimeError("Current API key is not valid or an actuall error happend {" + e + "}")
113
-
 
 
 
 
 
 
 
 
 
114
 
115
  def get_api_key():
116
  if ',' in api_key:
@@ -324,70 +416,19 @@ async def respond(
324
  seed,
325
  random_seed,
326
  fakeTool,
 
327
  betterSystemPrompt,
328
  consent
329
  ):
330
  if not consent:
331
- yield """[CONSENT] You must agree to the terms to use this application.
332
-
333
- ```
334
- By using our application, which integrates with OpenAI's API, you acknowledge and agree to the following terms regarding the data you provide:
335
-
336
- 1. Data Collection: This application may collect data shared through the Gradio endpoint or the API endpoint.
337
- 2. Privacy: Please avoid sharing any personal information.
338
- 3. Data Retention and Removal: Data files are deleted every 30 days, and the API is restarted to ensure data clearance.
339
- 4. Scope of Data Collected: The data collected includes model settings, chat history, and responses from the model. This applies only to 'chat' endpoints (Gradio and API) and excludes moderation checks.
340
- 5. Data Usage: The collected data is periodically reviewed, and if any concerning activity is detected, the code is updated accordingly.
341
-
342
- By continuing to use our application, you explicitly consent to the collection, use, and potential sharing of your data as described above. If you disagree with our data collection, usage, and sharing practices, we advise you not to use our application.
343
- ```
344
-
345
- To agree to user consent, please do the followings:
346
- 1. Scroll down to find the section labeled 'Additional Inputs' below this page.
347
- 2. Find and click the check box that says 'User Consent [I agree to the terms and conditions. (can't make a button for it)]'.
348
- 3. After agreeing, click either the `🗑️ Clear` button, the `↩️ Undo` button, or the `🔄 Retry` button located above the message input area."""
349
  return
350
 
351
  messages = [];
352
  if fakeTool:
353
- messages.append({"role": "system", "content": """[System: You have ability to generate images, via tools provided to you by system.
354
- To call a tool you need to write a json in a empty line; like writing it at the end of message.
355
- To generate a image; you need to follow this example JSON:
356
- {"tool": "imagine", "isCall": true, "prompt": "golden retriever sitting comfortably on a luxurious, modern couch. The retriever should look relaxed and content, with fluffy fur and a friendly expression. The couch should be stylish, possibly with elegant details like cushions and a soft texture that complements the dog's golden coat"}
357
- > 'tool' variable is used to define which tool you are calling
358
- > 'isCall' used to confirm that you are calling that function and not showing it for example
359
- > 'prompt' the image prompt that will be given to image generation model.
360
-
361
- Here's few more example so you can under stand better
362
- To show as an example>
363
- {"tool": "imagine", "isCall": false, "prompt": "futuristic robot playing chess against a human, with the robot confidently strategizing its next move while the human looks thoughtful and slightly perplexed"}
364
- {"tool": "imagine", "isCall": false, "prompt": "colorful parrot perched on a wooden fence, pecking at a vibrant tropical fruit. The parrot's feathers should be bright and varied, with greens, blues, and reds. The background should feature a lush, green jungle with scattered rays of sunlight"}
365
- {"tool": "imagine", "isCall": false, "prompt": "fluffy white cat lounging on a sunlit windowsill, with a gentle breeze blowing through the curtains"}
366
- To actually use the tool>
367
- {"tool": "imagine", "isCall": true, "prompt": "golden retriever puppy happily playing with a red ball in a sunny park. The park should have green grass, a few trees in the background, and a clear blue sky"}
368
- {"tool": "imagine", "isCall": true, "prompt": "red panda balancing on a tightrope, with a city skyline in the background"}
369
- {"tool": "imagine", "isCall": true, "prompt": "corgi puppy wearing sunglasses and a red bandana, sitting on a beach chair under a colorful beach umbrella, with a surfboard leaning against the chair and the ocean waves in the background"}
370
- In chat use examples:
371
- 1. ```
372
- Alright, here's an image of an hedgehog riding a skateboard:
373
- {"tool": "imagine", "isCall": true, "prompt": "A hedgehog riding a skateboard in a suburban park"}
374
- ```
375
- 2. ```
376
- Okay, here's the image you requested:
377
- {"tool": "imagine", "isCall": true, "prompt": "Persian cat lounging on a plush velvet sofa in a cozy, sunlit living room. The cat is elegantly poised, with a calm and regal demeanor, its fur meticulously groomed and slightly fluffed up as it rests comfortably"}
378
- ```]
379
- 3. ```
380
- This is how i generate images:
381
- {"tool": "imagine", "isCall": false, "prompt": "image prompt"}
382
- ```
383
- 4. (Do not do this, this would block the user from seeing the image.) ```
384
- Alright! Here's an image of a whimsical scene featuring a cat wearing a wizard hat, casting a spell with sparkling magic in a mystical forest.] \`\`\`
385
- {"tool": "imagine", "isCall": true, "prompt": "A playful cat wearing a colorful wizard hat, surrounded by magical sparkles and glowing orbs in a mystical forest. The cat looks curious and mischievous, with its tail swishing as it focuses on casting a spell. The forest is lush and enchanting, with vibrant flowers and soft, dappled sunlight filtering through the trees."}
386
- \`\`\`
387
- ```
388
- 5. (if in any case the user asks for the prompt)```
389
- Sure here's the prompt i wrote to generate the image below: `A colorful bird soaring through a bustling city skyline. The bird should have vibrant feathers, contrasting against the modern buildings and blue sky. Below, the city is alive with activity, featuring tall skyscrapers, busy streets, and small parks, creating a dynamic urban scene.`
390
- ```]"""})
391
  if betterSystemPrompt:
392
  messages.append({"role": "system", "content": f"You are a helpful assistant. You are an OpenAI GPT model named {model_name}. The current time is {time.strftime('%Y-%m-%d %H:%M:%S')}. Please adhere to OpenAI's usage policies and guidelines. Ensure your responses are accurate, respectful, and within the scope of OpenAI's rules."});
393
  else:
@@ -403,9 +444,10 @@ Sure here's the prompt i wrote to generate the image below: `A colorful bird soa
403
  if assistant_message:
404
  messages.append(assistant_message)
405
 
406
- user_message = handleMultimodalData(model_name, "user", message)
407
- if user_message:
408
- messages.append(user_message)
 
409
 
410
  mode = moderate(messages)
411
  if mode:
@@ -420,9 +462,57 @@ Sure here's the prompt i wrote to generate the image below: `A colorful bird soa
420
  yield "[MODERATION] I'm sorry, but I can't assist with that."
421
  return
422
 
423
- response = ""
424
-
425
- completion = streamChat({
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426
  "model": model_name,
427
  "messages": messages,
428
  "max_tokens": max_tokens,
@@ -431,24 +521,12 @@ Sure here's the prompt i wrote to generate the image below: `A colorful bird soa
431
  "seed": (random.randint(0, 2**32) if random_seed else seed),
432
  "user": rnd(),
433
  "stream": True
434
- })
435
-
436
- async for token in completion:
437
- response += token['choices'][0]['delta'].get("content", "")
438
- yield response
439
-
440
- for line in response.split('\n'):
441
- try:
442
- data = json.loads(line)
443
- if "tool" in data and "isCall" in data and data["tool"] == "imagine" and data["isCall"] == True and "prompt" in data:
444
- image_url = imagine(data["prompt"])
445
- response = response.replace(line, f'<img src="{image_url}" alt="{data["prompt"]}" width="512"/>')
446
- yield response
447
- except json.JSONDecodeError:
448
- continue
449
 
450
  handleApiKeys();loadModels();checkModels();loadENV();
451
- lastUpdateMessage = "Added image generation via DALL-E-3."
452
  demo = gr.ChatInterface(
453
  respond,
454
  title="GPT-4O-mini",
@@ -463,8 +541,9 @@ demo = gr.ChatInterface(
463
  gr.Slider(minimum=0, maximum=2**32, value=0, step=1, label="Seed"),
464
  gr.Checkbox(label="Randomize Seed", value=True),
465
  gr.Checkbox(label="FakeTool [Image generation beta]", value=True),
 
466
  gr.Checkbox(label="Better system prompt (ignores the system prompt set by user.)", value=True),
467
- gr.Checkbox(label="User Consent [I agree to the terms and conditions. (can't make a button for it)]", value=False)
468
  ],
469
  )
470
 
 
14
  import base64
15
  import json
16
  import time
17
+ import math
18
  import sys
19
  import os
20
 
 
25
  API_BASE = "env"# or "openai"
26
  api_key = os.environ['OPENAI_API_KEY']
27
  base_url = os.environ.get('OPENAI_BASE_URL', "https://api.openai.com/v1")
28
+ def_models = '["gpt-4", "gpt-4-0125-preview", "gpt-4-0314", "gpt-4-0613", "gpt-4-1106-preview", "gpt-4-1106-vision-preview", "gpt-4-32k-0314", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-vision-preview", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-mini", "gpt-4o-mini-2024-07-18"]'
29
+ consentPrompt = """[CONSENT] You must agree to the terms to use this application.
30
+
31
+ ```
32
+ By using our application, which integrates with OpenAI's API, you acknowledge and agree to the following terms regarding the data you provide:
33
+
34
+ 1. Data Collection: This application may collect data shared through the Gradio endpoint or the API endpoint.
35
+ 2. Privacy: Please avoid sharing any personal information.
36
+ 3. Data Retention and Removal: Data files are deleted every 30 days, and the API is restarted to ensure data clearance.
37
+ 4. Scope of Data Collected: The data collected includes model settings, chat history, and responses from the model. This applies only to 'chat' endpoints (Gradio and API) and excludes moderation checks.
38
+ 5. Data Usage: The collected data is periodically reviewed, and if any concerning activity is detected, the code is updated accordingly.
39
+
40
+ By continuing to use our application, you explicitly consent to the collection, use, and potential sharing of your data as described above. If you disagree with our data collection, usage, and sharing practices, we advise you not to use our application.
41
+ ```
42
+
43
+ To agree to user consent, please do the followings:
44
+ 1. Scroll down to find the section labeled 'Additional Inputs' below this page.
45
+ 2. Find and click the check box that says 'User Consent [I agree to the terms and conditions. (can't make a button for it)]'.
46
+ 3. After agreeing, click either the `🗑️ Clear` button, the `↩️ Undo` button, or the `🔄 Retry` button located above the message input area.""";
47
+ fakeToolPrompt = """[System: You have ability to generate images, via tools provided to you by system.
48
+ To call a tool you need to write a json in a empty line; like writing it at the end of message.
49
+ To generate a image; you need to follow this example JSON:
50
+ {"tool": "imagine", "isCall": true, "prompt": "golden retriever sitting comfortably on a luxurious, modern couch. The retriever should look relaxed and content, with fluffy fur and a friendly expression. The couch should be stylish, possibly with elegant details like cushions and a soft texture that complements the dog's golden coat"}
51
+ > 'tool' variable is used to define which tool you are calling
52
+ > 'isCall' used to confirm that you are calling that function and not showing it for example
53
+ > 'prompt' the image prompt that will be given to image generation model.
54
+
55
+ Here's few more example so you can under stand better
56
+ To show as an example>
57
+ {"tool": "imagine", "isCall": false, "prompt": "futuristic robot playing chess against a human, with the robot confidently strategizing its next move while the human looks thoughtful and slightly perplexed"}
58
+ {"tool": "imagine", "isCall": false, "prompt": "colorful parrot perched on a wooden fence, pecking at a vibrant tropical fruit. The parrot's feathers should be bright and varied, with greens, blues, and reds. The background should feature a lush, green jungle with scattered rays of sunlight"}
59
+ {"tool": "imagine", "isCall": false, "prompt": "fluffy white cat lounging on a sunlit windowsill, with a gentle breeze blowing through the curtains"}
60
+ To actually use the tool>
61
+ {"tool": "imagine", "isCall": true, "prompt": "golden retriever puppy happily playing with a red ball in a sunny park. The park should have green grass, a few trees in the background, and a clear blue sky"}
62
+ {"tool": "imagine", "isCall": true, "prompt": "red panda balancing on a tightrope, with a city skyline in the background"}
63
+ {"tool": "imagine", "isCall": true, "prompt": "corgi puppy wearing sunglasses and a red bandana, sitting on a beach chair under a colorful beach umbrella, with a surfboard leaning against the chair and the ocean waves in the background"}
64
+ In chat use examples:
65
+ 1.
66
+ Alright, here's an image of an hedgehog riding a skateboard:
67
+ {"tool": "imagine", "isCall": true, "prompt": "A hedgehog riding a skateboard in a suburban park"}
68
+ 2.
69
+ Okay, here's the image you requested:
70
+ {"tool": "imagine", "isCall": true, "prompt": "Persian cat lounging on a plush velvet sofa in a cozy, sunlit living room. The cat is elegantly poised, with a calm and regal demeanor, its fur meticulously groomed and slightly fluffed up as it rests comfortably"}
71
+ 3.
72
+ This is how i generate images:
73
+ {"tool": "imagine", "isCall": false, "prompt": "image prompt"}
74
+ 4. (Do not do this, this would block the user from seeing the image.)
75
+ Alright! Here's an image of a whimsical scene featuring a cat wearing a wizard hat, casting a spell with sparkling magic in a mystical forest.] ```
76
+ {"tool": "imagine", "isCall": true, "prompt": "A playful cat wearing a colorful wizard hat, surrounded by magical sparkles and glowing orbs in a mystical forest. The cat looks curious and mischievous, with its tail swishing as it focuses on casting a spell. The forest is lush and enchanting, with vibrant flowers and soft, dappled sunlight filtering through the trees."}
77
+ 5. (if in any case the user asks for the prompt)
78
+ Sure here's the prompt i wrote to generate the image below: `A colorful bird soaring through a bustling city skyline. The bird should have vibrant feathers, contrasting against the modern buildings and blue sky. Below, the city is alive with activity, featuring tall skyscrapers, busy streets, and small parks, creating a dynamic urban scene.`
79
+ ]""";
80
+ calcPrompt = """[System: You have ability to calculate math problems (formated on python) via fakeTool, via tools provided to you by system.
81
+ To call a tool you need to write a json in a empty line; like writing it at the end of message.
82
+ To generate a image; you need to follow this example JSON:
83
+ {"tool": "calc", "isCall": true, "prompt": "math.pi * 5"}
84
+ > 'tool' variable is used to define which tool you are calling
85
+ > 'isCall' used to confirm that you are calling that function and not showing it for example
86
+ > 'prompt' the math that will be done via python.
87
+
88
+ Here's few more example so you can under stand better
89
+ To show as an example>
90
+ {"tool": "calc", "isCall": false, "prompt": "math.sqrt(16)"}
91
+ {"tool": "calc", "isCall": false, "prompt": "math.pow(2, 3)"}
92
+ {"tool": "calc", "isCall": false, "prompt": "math.sin(math.pi / 2)"}
93
+ To actually use the tool>
94
+ {"tool": "calc", "isCall": true, "prompt": "math.factorial(5)"}
95
+ {"tool": "calc", "isCall": true, "prompt": "math.log(100, 10)"}
96
+ {"tool": "calc", "isCall": true, "prompt": "math.cos(0)"}
97
+ In chat use examples:
98
+ 1.
99
+ Please, wait while I calculate 2+2...
100
+ {"tool": "calc", "isCall": false, "prompt": "2+2"}
101
+ 2.
102
+ Plase, wait while I calculate the square root of 25...
103
+ {"tool": "calc", "isCall": true, "prompt": "math.sqrt(25)"}
104
+ 3.
105
+ This is how I perform calculations:
106
+ {"tool": "calc", "isCall": false, "prompt": "math.pow(3, 2)"}
107
+ 4. (Do not do this, this would block the user from seeing the result.)
108
+ Alright! Here's the result of a complex calculation involving trigonometry and logarithms. ```
109
+ {"tool": "calc", "isCall": true, "prompt": "math.sin(math.pi / 4) + math.log(10, 10)"}
110
+ ]""";
111
 
112
  # --- === CONFIG === ---
113
 
 
181
  if ('data' in response.json()):
182
  output.append(key)
183
  except Exception as e:
184
+ print((F"API key {key} is not valid or an actuall error happend {e}"))
185
  if len(output)==1:
186
  raise RuntimeError("No API key is working")
187
  api_key = ",".join(output)
188
  else:
189
  try:
190
+ response = requests.get(f"{base_url}/models", headers={"Authorization": f"Bearer {api_key}"})
191
  response.raise_for_status()
192
  if not ('data' in response.json()):
193
  raise RuntimeError("Current API key is not valid")
194
  except Exception as e:
195
+ raise RuntimeError(f"Current API key is not valid or an actual error happened: {e}")
196
+
197
+ def safe_eval(expression):
198
+ print(expression)
199
+ allowed_names = {name: obj for name, obj in math.__dict__.items() if not name.startswith("__")}
200
+ allowed_names['math'] = math
201
+ code = compile(expression, "<string>", "eval")
202
+ for name in code.co_names:
203
+ if name not in allowed_names and name != 'math':
204
+ raise NameError(f"Use of {name} is not allowed")
205
+ return eval(code, {"__builtins__": {}}, allowed_names)
206
 
207
  def get_api_key():
208
  if ',' in api_key:
 
416
  seed,
417
  random_seed,
418
  fakeTool,
419
+ calcBeta,
420
  betterSystemPrompt,
421
  consent
422
  ):
423
  if not consent:
424
+ yield consentPrompt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425
  return
426
 
427
  messages = [];
428
  if fakeTool:
429
+ messages.append({"role": "system", "content": fakeToolPrompt});
430
+ if calcBeta:
431
+ messages.append({"role": "system", "content": calcPrompt});
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
432
  if betterSystemPrompt:
433
  messages.append({"role": "system", "content": f"You are a helpful assistant. You are an OpenAI GPT model named {model_name}. The current time is {time.strftime('%Y-%m-%d %H:%M:%S')}. Please adhere to OpenAI's usage policies and guidelines. Ensure your responses are accurate, respectful, and within the scope of OpenAI's rules."});
434
  else:
 
444
  if assistant_message:
445
  messages.append(assistant_message)
446
 
447
+ if message:
448
+ user_message = handleMultimodalData(model_name, "user", message)
449
+ if user_message:
450
+ messages.append(user_message)
451
 
452
  mode = moderate(messages)
453
  if mode:
 
462
  yield "[MODERATION] I'm sorry, but I can't assist with that."
463
  return
464
 
465
+ image_count = 0
466
+ async def handleResponse(completion, prefix=""):
467
+ response = ""
468
+ didCalculationHappen = False
469
+ async for token in completion:
470
+ response += token['choices'][0]['delta'].get("content", "")
471
+ yield f"{prefix}{response}"
472
+ for line in response.split('\n'):
473
+ try:
474
+ data = json.loads(line)
475
+ if data.get("tool") == "imagine" and data.get("isCall") and "prompt" in data:
476
+ if image_count < 4:
477
+ image_count += 1
478
+ def fetch_image_url(prompt, line):
479
+ image_url = imagine(prompt)
480
+ return line, f'<img src="{image_url}" alt="{prompt}" width="512"/>'
481
+
482
+ def replace_line_in_response(line, replacement):
483
+ nonlocal response
484
+ response = response.replace(line, replacement)
485
+
486
+ thread = threading.Thread(target=lambda: replace_line_in_response(*fetch_image_url(data["prompt"], line)))
487
+ thread.start()
488
+ thread.join()
489
+ else:
490
+ response = response.replace(line, f'[System: 4 image per message limit; prompt asked: `{data["prompt"]}]`')
491
+ yield f"{prefix}{response}"
492
+ elif data.get("tool") == "calc" and data.get("isCall") and "prompt" in data:
493
+ didCalculationHappen = True
494
+ try:
495
+ result = safe_eval(data["prompt"])
496
+ response = response.replace(line, f'[System: `{data["prompt"]}` === `{result}`]')
497
+ except Exception as e:
498
+ response = response.replace(line, f'[System: Error in calculation; `{e}`]')
499
+ yield f"{prefix}{response}"
500
+ except json.JSONDecodeError:
501
+ continue
502
+ if didCalculationHappen:
503
+ messages.append({"role": "assistant", "content": response})
504
+ async for res in handleResponse(streamChat({
505
+ "model": model_name,
506
+ "messages": messages,
507
+ "max_tokens": max_tokens,
508
+ "temperature": temperature,
509
+ "top_p": top_p,
510
+ "seed": (random.randint(0, 2**32) if random_seed else seed),
511
+ "user": rnd(),
512
+ "stream": True
513
+ }), f"{response}\n\n"):
514
+ yield res
515
+ async for res in handleResponse(streamChat({
516
  "model": model_name,
517
  "messages": messages,
518
  "max_tokens": max_tokens,
 
521
  "seed": (random.randint(0, 2**32) if random_seed else seed),
522
  "user": rnd(),
523
  "stream": True
524
+ })):
525
+ yield res
526
+
 
 
 
 
 
 
 
 
 
 
 
 
527
 
528
  handleApiKeys();loadModels();checkModels();loadENV();
529
+ lastUpdateMessage = "Added math faketool."
530
  demo = gr.ChatInterface(
531
  respond,
532
  title="GPT-4O-mini",
 
541
  gr.Slider(minimum=0, maximum=2**32, value=0, step=1, label="Seed"),
542
  gr.Checkbox(label="Randomize Seed", value=True),
543
  gr.Checkbox(label="FakeTool [Image generation beta]", value=True),
544
+ gr.Checkbox(label="FakeTool [Calculator beta]", value=True),
545
  gr.Checkbox(label="Better system prompt (ignores the system prompt set by user.)", value=True),
546
+ gr.Checkbox(label="User Consent [I agree to the terms and conditions. (can't make a button for it)]", value=("--dev" in sys.argv or "-d" in sys.argv))
547
  ],
548
  )
549
 
index.html CHANGED
@@ -37,6 +37,10 @@
37
  <hr/>
38
  <h2>Updates</h2>
39
  <div>
 
 
 
 
40
  <div>
41
  <strong> - 6. Update.</strong>
42
  <p> * Added image generation via DALL-E-3.</p>
 
37
  <hr/>
38
  <h2>Updates</h2>
39
  <div>
40
+ <div>
41
+ <strong> - 7. Update.</strong>
42
+ <p> * Added a second faketool: math. Now the model can perform calculations.</p>
43
+ </div><hr/>
44
  <div>
45
  <strong> - 6. Update.</strong>
46
  <p> * Added image generation via DALL-E-3.</p>
requirements.txt CHANGED
@@ -2,4 +2,5 @@ uvicorn==0.27.1
2
  starlette==0.37.2
3
  sse-starlette==2.1.2
4
  requests==2.31.0
5
- aiohttp==3.8.5
 
 
2
  starlette==0.37.2
3
  sse-starlette==2.1.2
4
  requests==2.31.0
5
+ aiohttp==3.8.5
6
+ asyncio