private-gpt icon indicating copy to clipboard operation
private-gpt copied to clipboard

[BUG] stream:true Error : ValueError: Unsupported vector type <class 'NoneType'>

Open kurdo2000 opened this issue 10 months ago • 2 comments

Pre-check

  • [x] I have searched the existing issues and none cover this bug.

Description

I'm trying to Stream the answer of the local module to a simple webpage.

When I use following, it works, but without stream : true

async function sendMessage(){ const userText = chatInput.value.trim(); if(!userText) return; chatInput.value = ''; chatLog.push({role:'user', content:userText}); renderChat();

const body = {
  messages: chatLog,
  use_context: true,
  include_sources: true
};
try {
  const resp = await fetch(`${API_URL}/v1/chat/completions`, {
    method:'POST', headers:{'Content-Type':'application/json'},
    body: JSON.stringify(body)
  });
  const data = await resp.json();
  if(data?.choices?.length){
    let ans = data.choices[0].message.content || "(Keine Antwort)";
    ans = ans.replace(/\(Quellen:.+\)$/,'').trim();
    chatLog.push({role:'assistant', content:ans});
  } else {
    chatLog.push({role:'assistant', content:'(Fehler/keine Antwort)'});
  }
} catch(e){
  chatLog.push({role:'assistant', content:'(Fehler: '+e+')'});
}
renderChat();

}

I get the correct answers from my PDS-Files.

But when I use stream : true, I get the following error:

03:57:59.483 [ERROR ] uvicorn.error - Exception in ASGI application Traceback (most recent call last): File "/home/x/venv/lib/python3.11/site-packages/uvicorn/protocols/http/httptools_impl.py", line 401, in run_asgi result = await app( # type: ignore[func-returns-value] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/uvicorn/middleware/proxy_headers.py", line 70, in call return await self.app(scope, receive, send) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/fastapi/applications.py", line 1054, in call await super().call(scope, receive, send) File "/home/x/venv/lib/python3.11/site-packages/starlette/applications.py", line 113, in call await self.middleware_stack(scope, receive, send) File "/home/x/venv/lib/python3.11/site-packages/starlette/middleware/errors.py", line 187, in call raise exc File "/home/x/venv/lib/python3.11/site-packages/starlette/middleware/errors.py", line 165, in call await self.app(scope, receive, _send) File "/home/x/venv/lib/python3.11/site-packages/starlette/middleware/cors.py", line 93, in call await self.simple_response(scope, receive, send, request_headers=headers) File "/home/x/venv/lib/python3.11/site-packages/starlette/middleware/cors.py", line 144, in simple_response await self.app(scope, receive, send) File "/home/x/venv/lib/python3.11/site-packages/starlette/middleware/exceptions.py", line 62, in call await wrap_app_handling_exceptions(self.app, conn)(scope, receive, send) File "/home/x/venv/lib/python3.11/site-packages/starlette/_exception_handler.py", line 62, in wrapped_app raise exc File "/home/x/venv/lib/python3.11/site-packages/starlette/_exception_handler.py", line 51, in wrapped_app await app(scope, receive, sender) File "/home/x/venv/lib/python3.11/site-packages/starlette/routing.py", line 715, in call await self.middleware_stack(scope, receive, send) File "/home/x/venv/lib/python3.11/site-packages/starlette/routing.py", line 735, in app await route.handle(scope, receive, send) File "/home/x/venv/lib/python3.11/site-packages/starlette/routing.py", line 288, in handle await self.app(scope, receive, send) File "/home/x/venv/lib/python3.11/site-packages/starlette/routing.py", line 76, in app await wrap_app_handling_exceptions(app, request)(scope, receive, send) File "/home/x/venv/lib/python3.11/site-packages/starlette/_exception_handler.py", line 62, in wrapped_app raise exc File "/home/x/venv/lib/python3.11/site-packages/starlette/_exception_handler.py", line 51, in wrapped_app await app(scope, receive, sender) File "/home/x/venv/lib/python3.11/site-packages/starlette/routing.py", line 73, in app response = await f(request) ^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/fastapi/routing.py", line 301, in app raw_response = await run_endpoint_function( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/fastapi/routing.py", line 214, in run_endpoint_function return await run_in_threadpool(dependant.call, **values) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/starlette/concurrency.py", line 39, in run_in_threadpool return await anyio.to_thread.run_sync(func, *args) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/anyio/to_thread.py", line 56, in run_sync return await get_async_backend().run_sync_in_worker_thread( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/anyio/_backends/_asyncio.py", line 2405, in run_sync_in_worker_thread return await future ^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/anyio/_backends/_asyncio.py", line 914, in run result = context.run(func, *args) ^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/private-gpt/private_gpt/server/chat/chat_router.py", line 95, in chat_completion completion_gen = service.stream_chat( ^^^^^^^^^^^^^^^^^^^^ File "/home/x/private-gpt/private_gpt/server/chat/chat_service.py", line 175, in stream_chat streaming_response = chat_engine.stream_chat( ^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/llama_index/core/instrumentation/dispatcher.py", line 265, in wrapper result = func(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/llama_index/core/callbacks/utils.py", line 41, in wrapper return func(self, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/llama_index/core/chat_engine/context.py", line 237, in stream_chat nodes = self._get_nodes(message) ^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/llama_index/core/chat_engine/context.py", line 133, in _get_nodes nodes = self._retriever.retrieve(message) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/llama_index/core/instrumentation/dispatcher.py", line 265, in wrapper result = func(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/llama_index/core/base/base_retriever.py", line 245, in retrieve nodes = self._retrieve(query_bundle) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/llama_index/core/instrumentation/dispatcher.py", line 265, in wrapper result = func(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/llama_index/core/indices/vector_store/retrievers/retriever.py", line 103, in _retrieve return self._get_nodes_with_embeddings(query_bundle) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/llama_index/core/indices/vector_store/retrievers/retriever.py", line 180, in _get_nodes_with_embeddings query_result = self._vector_store.query(query, **self._kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/llama_index/vector_stores/qdrant/base.py", line 836, in query response = self._client.search( ^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/qdrant_client/qdrant_client.py", line 387, in search return self._client.search( ^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/qdrant_client/local/qdrant_local.py", line 204, in search return collection.search( ^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/qdrant_client/local/local_collection.py", line 519, in search name, query_vector = self._resolve_query_vector_name(query_vector) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/home/x/venv/lib/python3.11/site-packages/qdrant_client/local/local_collection.py", line 321, in _resolve_query_vector_name raise ValueError(f"Unsupported vector type {type(query_vector)}") ValueError: Unsupported vector type <class 'NoneType'>

I think I'm doing something wrong, but what?

Please notice: when I use use_context: false with stream:true it works and stream the output perfectly.

Steps to Reproduce

1- Call the function above with stream:true

Expected Behavior

Stream the output

Actual Behavior

Error.

Environment

Linux. Nvidia rtx 1660

Additional Information

No response

Version

latest as now

Setup Checklist

  • [x] Confirm that you have followed the installation instructions in the project’s documentation.
  • [x] Check that you are using the latest version of the project.
  • [x] Verify disk space availability for model storage and data processing.
  • [x] Ensure that you have the necessary permissions to run the project.

NVIDIA GPU Setup Checklist

  • [x] Check that the all CUDA dependencies are installed and are compatible with your GPU (refer to CUDA's documentation)
  • [x] Ensure an NVIDIA GPU is installed and recognized by the system (run nvidia-smi to verify).
  • [x] Ensure proper permissions are set for accessing GPU resources.
  • [x] Docker users - Verify that the NVIDIA Container Toolkit is configured correctly (e.g. run sudo docker run --rm --gpus all nvidia/cuda:11.0.3-base-ubuntu20.04 nvidia-smi)

kurdo2000 avatar Feb 04 '25 03:02 kurdo2000

This is the function with stream:true but without using context:true. Thsi works, but I need it to work with my context.

async function sendMessage(){ const userText = chatInput.value.trim(); if(!userText) return; chatInput.value = ''; chatLog.push({role:'user', content: userText}); renderChat();

// 1) Aktivieren des Streamings const body = { messages: chatLog, use_context: true, include_sources: true, stream: true // <--- hinzugefügt };

// 2) Placeholder für Bot-Antwort // Damit wir beim Streamen stückweise Text anhängen können const botMsg = {role:'assistant', content: ''}; chatLog.push(botMsg); renderChat();

try { const resp = await fetch(${API_URL}/v1/chat/completions, { method: 'POST', headers: {'Content-Type':'application/json'}, body: JSON.stringify(body) });

// Fehlerstatus auswerten
if(!resp.ok){
  botMsg.content = '(Fehler: ' + resp.status + ' ' + resp.statusText + ')';
  renderChat();
  return;
}

// 3) SSE-Reader: Chunks auslesen + ins Bot-Feld anhängen
const reader = resp.body.getReader();
const decoder = new TextDecoder('utf-8');
let done = false;

while(!done){
  const { value, done: readerDone } = await reader.read();
  done = readerDone;
  if(value){
    const chunk = decoder.decode(value, {stream:true});
    // chunk enthält ggf. mehrere "data: ..." Zeilen
    const lines = chunk.split(/\r?\n/).filter(Boolean);

    for(const line of lines){
      if(line.startsWith("data: ")){
        const jsonPart = line.substring(6).trim();
        if(jsonPart === "[DONE]"){
          done = true;
          break;
        }
        try {
          const obj = JSON.parse(jsonPart);
          if(obj?.choices?.length){
            const delta = obj.choices[0].delta?.content;
            if(delta){
              // Stück für Stück an Bot-Antwort hängen
              botMsg.content += delta;
              renderChat();
            }
          }
        } catch(e){
          console.log("JSON parse error:", e);
        }
      }
    }
  }
}

} catch(err){ botMsg.content = '(Fehler: ' + err + ')'; renderChat(); } }

kurdo2000 avatar Feb 04 '25 03:02 kurdo2000

Hey @kurdo2000, have you tried to upload manually and create the collection before to chat? It seems that qdrant collection is not created.

jaluma avatar Feb 05 '25 08:02 jaluma