flair
flair copied to clipboard
Adding a transformer based encoder-decoder model
This PR is aimed to address this feature I proposed. Please let me know if you have any suggestions for this model! Thank you!
Example codes to use the model:
from flair.datasets import OpusParallelCorpus
from flair.embeddings import TransformerEmbeddings
from flair.models import CausalLanguageModelDecoder, EncoderDecoderLanguageModel
from flair.trainers import ModelTrainer
# 1) Create a corpus for the Tatoeba dataset.
# We will learn a model which can translate German to English.
corpus = OpusParallelCorpus(
dataset="tatoeba", # "tatoeba" or "subtitles" are supported in this example
l1="de",
l2="en",
max_tokens_per_doc=512,
)
# 2) Create an encoder and a decoder
encoder_embedding = TransformerEmbeddings("Qwen/Qwen2.5-0.5B-Instruct")
decoder = CausalLanguageModelDecoder("gpt2")
# 3) Define input text and output text for the encoder-decoder model
def input_text_fn1(datapoint):
return datapoint.first.text
def output_text_fn1(datapoint):
return datapoint.second.text
# 4) Instantiate models
edm1 = EncoderDecoderLanguageModel(
encoder_embeddings=encoder_embedding,
decoder=decoder,
label_type="translate_de_to_en",
generate_input_text_fn=input_text_fn1,
generate_output_text_fn=output_text_fn1,
)
trainer = ModelTrainer(edm1, corpus)
trainer.fine_tune("local", max_epochs=1, mini_batch_size=8, eval_batch_size=8, save_model_each_k_epochs=1)
The two functions, generate_input_text_fn and generate_output_text_fn, provide the flexibility to create different tasks without duplicating the underlying dataset. Example codes to demonstrate this flexibility.
from flair.datasets import OpusParallelCorpus
from flair.embeddings import TransformerEmbeddings
from flair.models import CausalLanguageModelDecoder, EncoderDecoderLanguageModel
from flair.nn.multitask import make_multitask_model_and_corpus
from flair.trainers import ModelTrainer
# 1) Create a corpus for the Tatoeba dataset.
# We will learn a model which can translate English to German and translate German to English
corpus = OpusParallelCorpus(
dataset="tatoeba", # "tatoeba" or "subtitles" are supported in this example
l1="de",
l2="en",
max_tokens_per_doc=512,
)
# 2) Create an encoder and a decoder
encoder_embedding = TransformerEmbeddings("Qwen/Qwen2.5-0.5B-Instruct")
decoder = CausalLanguageModelDecoder("gpt2")
# 3) Define input text and output text for the encoder-decoder model
def input_text_fn1(datapoint):
task_prompt = "[TASK] translate German to English: [TASK] "
return task_prompt + datapoint.first.text
def output_text_fn1(datapoint):
return datapoint.second.text
def input_text_fn2(datapoint):
task_prompt = "[TASK] translate English to German: [TASK] "
return task_prompt + datapoint.second.text
def output_text_fn2(datapoint):
return datapoint.first.text
# 4) Instantiate models
edm1 = EncoderDecoderLanguageModel(
encoder_embeddings=encoder_embedding,
decoder=decoder,
label_type="translate_de_to_en",
generate_input_text_fn=input_text_fn1,
generate_output_text_fn=output_text_fn1,
)
edm2 = EncoderDecoderLanguageModel(
encoder_embeddings=encoder_embedding,
decoder=decoder,
label_type="translate_en_to_de",
generate_input_text_fn=input_text_fn2,
generate_output_text_fn=output_text_fn2,
)
multitask_model, multicorpus = make_multitask_model_and_corpus([(edm1, corpus), (edm2, corpus)])
trainer = ModelTrainer(multitask_model, multicorpus)
trainer.fine_tune("local", max_epochs=1, mini_batch_size=8, eval_batch_size=8, save_model_each_k_epochs=1)