Spaces:
Sleeping
Sleeping
| import os | |
| import io | |
| from PIL import Image | |
| import base64 | |
| from transformers import pipeline | |
| import gradio as gr | |
| hf_api_key = os.environ['HF_API_KEY'] | |
| # Load the image-to-text pipeline with BLIP model | |
| get_completion = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") | |
| def image_to_base64_str(pil_image): | |
| byte_arr = io.BytesIO() | |
| pil_image.save(byte_arr, format='PNG') | |
| byte_arr = byte_arr.getvalue() | |
| return str(base64.b64encode(byte_arr).decode('utf-8')) | |
| def captioner(image): | |
| # The BLIP model expects a PIL image directly | |
| result = get_completion(image) | |
| return result[0]['generated_text'] | |
| demo = gr.Interface(fn=captioner, | |
| inputs=[gr.Image(label="Upload image", type="pil")], | |
| outputs=[gr.Textbox(label="Caption")], | |
| title="Image Captioning with BLIP", | |
| description="Caption any image using the BLIP model", | |
| flagging_mode="never", # Updated from allow_flagging | |
| examples=["images/christmas_dog.jpg", "images/bird_flight.jpg", "images/cow.jpg"]) | |
| demo.launch( | |
| share=True, | |
| # server_port=int(os.environ.get('PORT3', 7860)) # Uncomment if needed | |
| ) |