FlagEmbedding icon indicating copy to clipboard operation
FlagEmbedding copied to clipboard

跑测试集遇到个问题,是我的用法不对吗?

Open iorisa opened this issue 1 year ago • 1 comments

import argparse
from pathlib import Path

from C_MTEB.tasks import *
from flag_dres_model import FlagDRESModel
from mteb import MTEB
import openai
import tiktoken
import pickle
import os

API_KEY = "********************"
BASE_URL = "*****************"

class OpenAIEmbedder:
    """
    Benchmark OpenAIs embeddings endpoint.
    """

    def __init__(self, engine, task_name=None, batch_size=32, save_emb=False, **kwargs):
        self.engine = engine
        self.max_token_len = 8191
        self.batch_size = batch_size
        self.save_emb = save_emb  # Problematic as the filenames may end up being the same
        self.base_path = f"embeddings/{engine.split('/')[-1]}/"
        self.tokenizer = tiktoken.get_encoding('cl100k_base')
        self.task_name = task_name
        self.client = openai.OpenAI(api_key=API_KEY, base_url=BASE_URL)

        if save_emb:
            assert self.task_name is not None

        Path(self.base_path).mkdir(parents=True, exist_ok=True)

    def encode(self,
               sentences,
               decode=True,
               idx=None,
               **kwargs
               ):

        fin_embeddings = []

        embedding_path = f"{self.base_path}/{self.task_name}_{sentences[0][:10]}_{sentences[-1][-10:]}.pickle"
        if sentences and os.path.exists(embedding_path):
            loaded = pickle.load(open(embedding_path, "rb"))
            fin_embeddings = loaded["fin_embeddings"]
        else:
            for i in range(0, len(sentences), self.batch_size):
                batch = sentences[i: i + self.batch_size]

                batch = [self.tokenizer.decode(
                    self.tokenizer.encode(sentence)[:self.max_token_len])
                    for sentence
                    in batch]

                out = [datum.embedding for datum in self.client.embeddings.create(input=batch, model=self.engine).data]

                fin_embeddings.extend(out)
        # Save embeddings
        if fin_embeddings and self.save_emb:
            dump = {
                "fin_embeddings": fin_embeddings,
            }
            pickle.dump(dump, open(embedding_path, "wb"))

        assert len(sentences) == len(fin_embeddings)
        return fin_embeddings

if __name__ == '__main__':
    task_names = ['T2Retrieval', 'MMarcoRetrieval', 'DuRetrieval',
                    'CovidRetrieval', 'CmedqaRetrieval',
                    'EcomRetrieval', 'MedicalRetrieval', 'VideoRetrieval']
    model_name = "text-embedding-3-small"
    batch_size = 64
    for task in task_names:
        model = OpenAIEmbedder(engine=model_name, task_name=task, batch_size=batch_size, save_emb=True)
        evaluation = MTEB(tasks=[task], task_langs=['zh', 'zh-CN'])
        evaluation.run(model, output_folder=f"zh_results/{model_name}", batch_size=batch_size,  corpus_chunk_size=10000)

异常处是: C_MTEB.tasks.Classification:TNews(hf_subsets=['zh', 'zh-CN', 'zh-zh'], **{})中的metadata_dict没有初始化: 截屏2024-08-23 18 09 05

iorisa avatar Aug 23 '24 10:08 iorisa

可以试试降低mteb库的版本

staoxiao avatar Aug 23 '24 11:08 staoxiao