| | from torch.utils.data import DataLoader |
| | import math |
| | from sentence_transformers import models, losses |
| | from sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample |
| | from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator |
| | import logging |
| | from datetime import datetime |
| | import os |
| | import gzip |
| | import csv |
| |
|
| | |
| | logging.basicConfig(format='%(asctime)s - %(message)s', |
| | datefmt='%Y-%m-%d %H:%M:%S', |
| | level=logging.INFO, |
| | handlers=[LoggingHandler()]) |
| | |
| |
|
| | |
| | model_name = 'distilbert-base-uncased' |
| | train_batch_size = 128 |
| | num_epochs = 1 |
| | max_seq_length = 32 |
| |
|
| | |
| | model_save_path = 'output/training_stsb_simcse-{}-{}-{}'.format(model_name, train_batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) |
| |
|
| | |
| | sts_dataset_path = 'data/stsbenchmark.tsv.gz' |
| |
|
| | if not os.path.exists(sts_dataset_path): |
| | util.http_get('https://sbert.net/datasets/stsbenchmark.tsv.gz', sts_dataset_path) |
| |
|
| | |
| | word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length) |
| | pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension()) |
| | model = SentenceTransformer(modules=[word_embedding_model, pooling_model]) |
| |
|
| | |
| | wikipedia_dataset_path = 'data/wiki1m_for_simcse.txt' |
| | if not os.path.exists(wikipedia_dataset_path): |
| | util.http_get('https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt', wikipedia_dataset_path) |
| |
|
| | |
| | train_samples = [] |
| | with open(wikipedia_dataset_path, 'r', encoding='utf8') as fIn: |
| | for line in fIn: |
| | line = line.strip() |
| | if len(line) >= 10: |
| | train_samples.append(InputExample(texts=[line, line])) |
| |
|
| | |
| | logging.info("Read STSbenchmark dev dataset") |
| | dev_samples = [] |
| | test_samples = [] |
| | with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn: |
| | reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE) |
| | for row in reader: |
| | score = float(row['score']) / 5.0 |
| |
|
| | if row['split'] == 'dev': |
| | dev_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=score)) |
| | elif row['split'] == 'test': |
| | test_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=score)) |
| |
|
| | dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, batch_size=train_batch_size, name='sts-dev') |
| | test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, batch_size=train_batch_size, name='sts-test') |
| |
|
| | |
| | train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size, drop_last=True) |
| | train_loss = losses.MultipleNegativesRankingLoss(model) |
| |
|
| | warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) |
| | evaluation_steps = int(len(train_dataloader) * 0.1) |
| | logging.info("Training sentences: {}".format(len(train_samples))) |
| | logging.info("Warmup-steps: {}".format(warmup_steps)) |
| | logging.info("Performance before training") |
| | dev_evaluator(model) |
| |
|
| | |
| | model.fit(train_objectives=[(train_dataloader, train_loss)], |
| | evaluator=dev_evaluator, |
| | epochs=num_epochs, |
| | evaluation_steps=evaluation_steps, |
| | warmup_steps=warmup_steps, |
| | output_path=model_save_path, |
| | optimizer_params={'lr': 5e-5}, |
| | use_amp=True |
| | ) |
| |
|
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | model = SentenceTransformer(model_save_path) |
| | test_evaluator(model, output_path=model_save_path) |
| |
|