LLaVA icon indicating copy to clipboard operation
LLaVA copied to clipboard

How to make an image request to test_message.py

Open tensorboy opened this issue 1 year ago • 1 comments

I'm trying to send image to test_message.py.

Here is the code I've modified:

import argparse
import json

import requests
import hashlib
import base64
from llava.conversation import default_conversation
from io import BytesIO
from PIL import Image

'''
python -m llava.serve.test_message --model-name LLaVA-7B-v0 --controller http://localhost:10000
'''
def main():
    if args.worker_address:
        worker_addr = args.worker_address
    else:
        controller_addr = args.controller_address
        ret = requests.post(controller_addr + "/refresh_all_workers")
        ret = requests.post(controller_addr + "/list_models")
        models = ret.json()["models"]
        models.sort()
        print(f"Models: {models}")

        ret = requests.post(controller_addr + "/get_worker_address",
            json={"model": args.model_name})
        worker_addr = ret.json()["address"]
        print(f"worker_addr: {worker_addr}")

    if worker_addr == "":
        return

    conv = default_conversation.copy()
    conv.append_message(conv.roles[0], args.message)
    prompt = conv.get_prompt()

    headers = {"User-Agent": "LLaVA Client"}
    pload = {
        "model": args.model_name,
        "prompt": prompt,
        "max_new_tokens": args.max_new_tokens,
        "temperature": 0.7,
        "stop": conv.sep,
    }
    image_path = '/home/tensorboy/LLaVA/images/llava_logo.png'
    image = Image.open(image_path)
    image = image.resize((224, 224))
    buffered = BytesIO()
    image.save(buffered, format="JPEG")
    img_b64_str = base64.b64encode(buffered.getvalue()).decode()
    #images.append(img_b64_str)
    encoded_image = img_b64_str
    all_images = [encoded_image]
    #print(all_images)
    #all_image_hash = [hashlib.md5(image.tobytes()).hexdigest() for image in all_images]
    #print(all_image_hash)
    pload['images'] = all_images
    response = requests.post(worker_addr + "/worker_generate_stream", headers=headers,
            json=pload, stream=True)

    print(prompt.replace(conv.sep, "\n"), end="")
    for chunk in response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b"\0"):
        if chunk:
            data = json.loads(chunk.decode("utf-8"))
            output = data["text"].split(conv.sep)[-1]
            print(output, end="\r")
    print("")


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--controller-address", type=str, default="http://localhost:21001")
    parser.add_argument("--worker-address", type=str)
    parser.add_argument("--model-name", type=str, default="facebook/opt-350m")
    parser.add_argument("--max-new-tokens", type=int, default=32)
    parser.add_argument("--message", type=str, default=
        "Describe the image in detail.")
    args = parser.parse_args()

    main()

and it just stucked at here

python -m llava.serve.test_message --model-name LLaVA-7B-v0 --controller http://localhost:10000
Models: ['LLaVA-7B-v0']
worker_addr: http://localhost:40000

any suggestions to fix?

tensorboy avatar May 02 '23 05:05 tensorboy