awais-nayyar commited on
Commit
2e6f77d
1 Parent(s): 8bca4a9

initial changes

Browse files
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
Makefile ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ install:
2
+ pip install --upgrade pip &&\
3
+ pip install -r requirements.txt
4
+
5
+ test:
6
+ python -m pytest -vvv --cov=hello --cov=greeting \
7
+ --cov=smath --cov=web tests
8
+ python -m pytest --nbval notebook.ipynb #tests our jupyter notebook
9
+ #python -m pytest -v tests/test_web.py #if you just want to test web
10
+
11
+ debug:
12
+ python -m pytest -vv --pdb #Debugger is invoked
13
+
14
+ one-test:
15
+ python -m pytest -vv tests/test_greeting.py::test_my_name4
16
+
17
+ debugthree:
18
+ #not working the way I expect
19
+ python -m pytest -vv --pdb --maxfail=4 # drop to PDB for first three failures
20
+
21
+ format:
22
+ black *.py
23
+
24
+ lint:
25
+ pylint --disable=R,C *.py
26
+
27
+ all: install lint test format
28
+
README.md CHANGED
@@ -1 +1,15 @@
1
- # HF_imageTotext_deployment
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ title: Image To Text Awais Nayyar
2
+ tags:
3
+ - image to text
4
+ - language models
5
+ - LLMs
6
+ emoji: 📷
7
+ colorFrom: white
8
+ colorTo: blue
9
+ sdk: gradio
10
+ sdk_version: 3.14.0
11
+ app_file: app.py
12
+ pinned: true
13
+ license: mit
14
+
15
+ # HF_imageTotext_deployment
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import re
3
+ import gradio as gr
4
+ from PIL import Image
5
+
6
+ from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
7
+ import os
8
+ import tensorflow as tf
9
+ os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
10
+
11
+ device='cpu'
12
+
13
+ model_id = "nttdataspain/vit-gpt2-stablediffusion2-lora"
14
+ model = VisionEncoderDecoderModel.from_pretrained(model_id)
15
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
16
+ feature_extractor = ViTFeatureExtractor.from_pretrained(model_id)
17
+
18
+ # Predict function
19
+ def predict(image):
20
+ img = image.convert('RGB')
21
+ model.eval()
22
+ pixel_values = feature_extractor(images=[img], return_tensors="pt").pixel_values
23
+ with torch.no_grad():
24
+ output_ids = model.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True).sequences
25
+
26
+ preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
27
+ preds = [pred.strip() for pred in preds]
28
+ return preds[0]
29
+
30
+ input = gr.inputs.Image(label="Upload any Image", type = 'pil', optional=True)
31
+ output = gr.outputs.Textbox(type="text",label="Captions")
32
+ examples_folder = os.path.join(os.path.dirname(__file__), "examples")
33
+ examples = [os.path.join(examples_folder, file) for file in os.listdir(examples_folder)]
34
+
35
+ with gr.Blocks() as demo:
36
+
37
+ gr.HTML(
38
+ """
39
+ <div style="text-align: center; max-width: 1200px; margin: 20px auto;">
40
+ <h2 style="font-weight: 900; font-size: 3rem; margin: 0rem">
41
+ 📸 Image-to-Text with Awais Nayyar 📝
42
+ </h2>
43
+ <br>
44
+ </div>
45
+ """)
46
+
47
+ with gr.Row():
48
+ with gr.Column(scale=1):
49
+ # img = gr.inputs.Image(label="Upload any Image", type = 'pil', optional=True)
50
+ img = gr.Image(label="Upload any Image", type = 'pil', optional=True)
51
+
52
+ # img = gr.inputs.Image(type="pil", label="Upload any Image", optional=True)
53
+
54
+ button = gr.Button(value="Convert")
55
+ with gr.Column(scale=1):
56
+ # out = gr.outputs.Textbox(type="text",label="Captions")
57
+ out = gr.Label(type="text", label="Captions")
58
+
59
+
60
+ button.click(predict, inputs=[img], outputs=[out])
61
+
62
+ gr.Examples(
63
+ examples=examples,
64
+ inputs=img,
65
+ outputs=out,
66
+ fn=predict,
67
+ cache_examples=True,
68
+ )
69
+ demo.launch(debug=True)
examples/example1.jpg ADDED
examples/example2.jpg ADDED
examples/example3.jpg ADDED
gradio ADDED
File without changes
gradio_cached_examples/10/Captions/tmpprrqa72a.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"label": "a man in a suit and tie standing in front of a fence"}
gradio_cached_examples/10/Captions/tmps19yvtzi.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"label": "a woman in a white dress holding a white cat"}
gradio_cached_examples/10/Captions/tmptw1httwo.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"label": "a man with a beard standing in front of a blue sky"}
gradio_cached_examples/10/log.csv ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Captions,flag,username,timestamp
2
+ /workspaces/HF_imageTotext_deployment/gradio_cached_examples/10/Captions/tmptw1httwo.json,,,2023-10-13 17:11:27.247611
3
+ /workspaces/HF_imageTotext_deployment/gradio_cached_examples/10/Captions/tmpprrqa72a.json,,,2023-10-13 17:11:32.706829
4
+ /workspaces/HF_imageTotext_deployment/gradio_cached_examples/10/Captions/tmps19yvtzi.json,,,2023-10-13 17:11:37.773179
gradio_cached_examples/11/log.csv ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Captions,flag,username,timestamp
2
+ a man with a beard standing in front of a blue sky,,,2023-10-13 16:49:56.201797
3
+ a man in a suit and tie standing in front of a fence,,,2023-10-13 16:50:01.646168
4
+ a woman in a white dress holding a white cat,,,2023-10-13 16:50:06.803554
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ streamlit
2
+ transformers
3
+ pillow
4
+ requests
5
+ torch
6
+ tensorflow