Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

KeyError: 0 when using Tab #8709

Closed
1 task done
mohammednuruddin opened this issue Jul 6, 2024 · 3 comments
Closed
1 task done

KeyError: 0 when using Tab #8709

mohammednuruddin opened this issue Jul 6, 2024 · 3 comments
Labels
bug Something isn't working needs repro Awaiting full reproduction

Comments

@mohammednuruddin
Copy link

Describe the bug

I am building a chatbot with different tabs, one tab has a normal rag char and the other tab has a multimodal llm.
When I run the multimodal llm alone without including the chatinterface for the rag, i dont get the error but when I add the ChatInterface for the rag bot, I get an error when trying to use the multimodal llm.

Have you searched existing issues? 🔎

  • I have searched and found no existing issues

Reproduction

#!/usr/bin/env python
# encoding: utf-8
device = "cuda"

ERROR_MSG = "Error, please retry"
model_name = 'MiniCPM-Llama3-V 2.5'


def predict_rag(message, history):
    history.append((message, ""))

    # Convert history to the required format for the conversational model
    history_langchain_format = []
    for human, ai in history:
        history_langchain_format.append(HumanMessage(content=human))
        if ai:
            history_langchain_format.append(AIMessage(content=ai))

    response = conversational_rag_chain.stream({"input": message},
                                               config={
        "configurable": {"session_id": session_id}
    },)

    partial_response = ""
    for chunk in response:
        if answer_chunk := chunk.get("answer"):
            partial_response += answer_chunk
            # say(answer_chunk)
            # print(f"{answer_chunk}", end="")
            yield partial_response

    # Update the last entry with the AI's response
    history[-1] = (message, response)

    return
    
def create_component(params, comp='Slider'):
    if comp == 'Slider':
        return gr.Slider(
            minimum=params['minimum'],
            maximum=params['maximum'],
            value=params['value'],
            step=params['step'],
            interactive=params['interactive'],
            label=params['label']
        )
    elif comp == 'Radio':
        return gr.Radio(
            choices=params['choices'],
            value=params['value'],
            interactive=params['interactive'],
            label=params['label']
        )
    elif comp == 'Button':
        return gr.Button(
            value=params['value'],
            interactive=True
        )

# @spaces.GPU(duration=120)
def chat(img, msgs, ctx, params=None, vision_hidden_states=None):
    default_params = {"stream": False, "sampling": False, "num_beams":3, "repetition_penalty": 1.2, "max_new_tokens": 1024}
    if params is None:
        params = default_params
    if img is None:
        yield "Error, invalid image, please upload a new image"
    else:
        try:
            image = img.convert('RGB')
            answer = model.chat(
                image=image,
                msgs=msgs,
                tokenizer=tokenizer,
                **params
            )
            for char in answer:
                yield char
        except Exception as err:
            print(err)
            traceback.print_exc()
            yield ERROR_MSG


def upload_img(image, _chatbot, _app_session):
    image = Image.fromarray(image)

    _app_session['sts']=None
    _app_session['ctx']=[]
    _app_session['img']=image 
    _chatbot.append(('', 'Image uploaded successfully, you can talk to me now'))
    return _chatbot, _app_session


def respond(_chat_bot, _app_cfg):
    _question = _chat_bot[-1][0]
    print(':', _question)
    if _app_cfg.get('ctx', None) is None:
        _chat_bot[-1][1] = 'Please upload an image to start'
        yield (_chat_bot, _app_cfg)
    else:
        _context = _app_cfg['ctx'].copy()
        if _context:
            _context.append({"role": "user", "content": _question})
        else:
            _context = [{"role": "user", "content": _question}]
        params = {
                'sampling': True,
                'stream': True,
                'top_p': 0.8,
                'top_k': 100,
                'temperature': 0.7,
                'repetition_penalty': 1.05,
                "max_new_tokens": 896 
            }
    
        gen = chat(_app_cfg['img'], _context, None, params)
        _chat_bot[-1][1] = ""
        for _char in gen:
            _chat_bot[-1][1] += _char
            _context[-1]["content"] += _char
            yield (_chat_bot, _app_cfg)


def request(_question, _chat_bot, _app_cfg):
    _chat_bot.append((_question, None))
    return '', _chat_bot, _app_cfg


def regenerate_button_clicked(_question, _chat_bot, _app_cfg):
    if len(_chat_bot) <= 1:
        _chat_bot.append(('Regenerate', 'No question for regeneration.'))
        return '', _chat_bot, _app_cfg
    elif _chat_bot[-1][0] == 'Regenerate':
        return '', _chat_bot, _app_cfg
    else:
        _question = _chat_bot[-1][0]
        _chat_bot = _chat_bot[:-1]
        _app_cfg['ctx'] = _app_cfg['ctx'][:-2]
    return request(_question, _chat_bot, _app_cfg)


def clear_button_clicked(_question, _chat_bot, _app_cfg, _bt_pic):
    _chat_bot.clear()
    _app_cfg['sts'] = None
    _app_cfg['ctx'] = None
    _app_cfg['img'] = None
    _bt_pic = None
    return '', _chat_bot, _app_cfg, _bt_pic
    
css = """
#chatbot {
    flex-grow: 2 !important;  /* Changed size by increasing flex-grow */
    overflow: auto !important;
}
#col { height: calc(90vh - 2px - 16px) !important; }
"""

with gr.Blocks(css=css) as demo:
    gr.Markdown("## Your NSMQ Assistant")
    # with gr.Tab("Prep", ):
    #     with gr.Row():
    #         with gr.Column(scale=1,elem_id="col"):
    #             chat = gr.ChatInterface(
    #                 fn=rag,
    #                 chatbot=gr.Chatbot(elem_id="chatbot",
    #                                 render=False),

                    
    #             )
           
    with gr.Tab("Fast QA"):
        # pass
        with gr.Column(elem_id="col"):
            chat = gr.ChatInterface(predict_rag, fill_height=True)
            # chat = gr.chat_bot()
        # with gr.Column(elem_id='col'):
        #     image_input = gr.Image()
        #     image_output = gr.Image()
        # image_button = gr.Button("Flip")
            
    with gr.Tab("Chat with images"):
        with gr.Row():
            with gr.Column(scale=1, min_width=300):
                bt_pic = gr.Image(label="Upload an image to start")
                regenerate = create_component({'value': 'Regenerate'}, comp='Button')
                clear = create_component({'value': 'Clear'}, comp='Button')
            with gr.Column(scale=3, min_width=500):
                app_session = gr.State({'sts':None,'ctx':None,'img':None})
                chat_bot = gr.Chatbot(label=f"Chat with {model_name}")
                txt_message = gr.Textbox(label="Input text")
                
                clear.click(
                    clear_button_clicked,
                    [txt_message, chat_bot, app_session, bt_pic],
                    [txt_message, chat_bot, app_session, bt_pic],
                    queue=False
                )
                txt_message.submit(
                    request, 
                    [txt_message, chat_bot, app_session],
                    [txt_message, chat_bot, app_session],
                    queue=False
                ).then(
                    respond,
                    [chat_bot, app_session],
                    [chat_bot, app_session]
                )
                regenerate.click(
                    regenerate_button_clicked,
                    [txt_message, chat_bot, app_session],
                    [txt_message, chat_bot, app_session],
                    queue=False
                ).then(
                    respond,
                    [chat_bot, app_session],
                    [chat_bot, app_session]
                )
                bt_pic.upload(lambda: None, None, chat_bot, queue=False).then(upload_img, inputs=[bt_pic,chat_bot,app_session], outputs=[chat_bot,app_session])

# launch
# demo.launch(share=True, debug=True, show_api=False, server_port=8080)
# demo.queue()
demo.launch(share=True, debug=True,)

Screenshot

image

Logs

Traceback (most recent call last):
  File "/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/gradio/queueing.py", line 541, in process_events
    response = await route_utils.call_process_api(
  File "/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/gradio/route_utils.py", line 276, in call_process_api
    output = await app.get_blocks().process_api(
  File "/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/gradio/blocks.py", line 1928, in process_api
    result = await self.call_function(
  File "/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/gradio/blocks.py", line 1526, in call_function
    prediction = await utils.async_iteration(iterator)
  File "/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/gradio/utils.py", line 657, in async_iteration
    return await iterator.__anext__()
  File "/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/gradio/utils.py", line 650, in __anext__
    return await anyio.to_thread.run_sync(
  File "/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/anyio/to_thread.py", line 56, in run_sync
    return await get_async_backend().run_sync_in_worker_thread(
  File "/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 2177, in run_sync_in_worker_thread
    return await future
  File "/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 859, in run
    result = context.run(func, *args)
  File "/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/gradio/utils.py", line 633, in run_sync_iterator_async
    return next(iterator)
  File "/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/gradio/utils.py", line 816, in gen_wrapper
    response = next(iterator)
  File "/tmp/ipykernel_9635/3196440666.py", line 116, in respond
    gen = chat(_app_cfg['img'], _context, None, params)
  File "/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/gradio/blocks.py", line 1429, in __call__
    if not (self.is_callable(fn_index)):
  File "/home/zeus/miniconda3/envs/cloudspace/lib/python3.10/site-packages/gradio/blocks.py", line 1394, in is_callable
    block_fn = self.fns[fn_index]
KeyError: 0

System Info

Gradio Environment Information:
------------------------------
Operating System: Windows
gradio version: 4.36.1
gradio_client version: 1.0.1

------------------------------------------------
gradio dependencies in your environment:

aiofiles: 23.2.1
altair: 5.2.0
fastapi: 0.110.0
ffmpy: 0.3.2
gradio-client==1.0.1 is not installed.
httpx: 0.27.0
huggingface-hub: 0.23.3
importlib-resources: 6.3.0
jinja2: 3.1.3
markupsafe: 2.1.5
matplotlib: 3.8.3
numpy: 1.26.4
orjson: 3.9.15
packaging: 23.2
pandas: 2.0.3
pillow: 10.2.0
pydantic: 2.6.3
pydub: 0.25.1
python-multipart: 0.0.9
pyyaml: 6.0.1
ruff: 0.3.2
semantic-version: 2.10.0
tomlkit==0.12.0 is not installed.
typer: 0.12.3
typing-extensions: 4.9.0
urllib3: 2.0.7
uvicorn: 0.28.0
authlib; extra == 'oauth' is not installed.
itsdangerous; extra == 'oauth' is not installed.


gradio_client dependencies in your environment:

fsspec: 2024.2.0
httpx: 0.27.0
huggingface-hub: 0.23.3
packaging: 23.2
typing-extensions: 4.9.0
websockets: 11.0.3

Severity

Blocking usage of gradio

@mohammednuruddin mohammednuruddin added the bug Something isn't working label Jul 6, 2024
@abidlabs abidlabs added the needs repro Awaiting full reproduction label Jul 7, 2024
@abidlabs
Copy link
Member

abidlabs commented Jul 7, 2024

Hi @mohammednuruddin can you please provide a minimal code example that we can use to reproduce the issue above? The current code example has quite a lot going on and includes external dependencies. See: https://stackoverflow.com/help/minimal-reproducible-example

@abidlabs
Copy link
Member

abidlabs commented Jul 8, 2024

Going to close this for now, but happy to reopen once we have a simpler, standalone repro!

@abidlabs abidlabs closed this as not planned Won't fix, can't repro, duplicate, stale Jul 8, 2024
@mohammednuruddin
Copy link
Author

I solved it. Thanks.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
bug Something isn't working needs repro Awaiting full reproduction
Projects
None yet
Development

No branches or pull requests

2 participants