diff --git a/pyproject.toml b/pyproject.toml index 9297b1150..a2477a630 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,12 +1,15 @@ [project] name = "sentence-transformers" -version = "3.1.0" -description = "Multilingual text embeddings" -license = { file = "LICENSE" } +version = "3.1.1" +description = "State-of-the-Art Text Embeddings" +license = { text = "Apache 2.0" } readme = "README.md" authors = [ { name = "Nils Reimers", email = "info@nils-reimers.de" }, - { name = "Tom Aarsen" }, + { name = "Tom Aarsen", email = "tom.aarsen@huggingface.co" }, +] +maintainers = [ + { name = "Tom Aarsen", email = "tom.aarsen@huggingface.co" } ] requires-python = ">=3.8" keywords = [ @@ -33,7 +36,6 @@ dependencies = [ "transformers>=4.38.0,<5.0.0", "tqdm", "torch>=1.11.0", - "numpy<2.0.0", "scikit-learn", "scipy", "huggingface-hub>=0.19.3", diff --git a/sentence_transformers/__init__.py b/sentence_transformers/__init__.py index 14f3ec7c4..a97f44e78 100644 --- a/sentence_transformers/__init__.py +++ b/sentence_transformers/__init__.py @@ -1,6 +1,6 @@ from __future__ import annotations -__version__ = "3.1.0" +__version__ = "3.1.1" __MODEL_HUB_ORGANIZATION__ = "sentence-transformers" import importlib diff --git a/sentence_transformers/util.py b/sentence_transformers/util.py index 59e7bf0c4..5b8baa5f7 100644 --- a/sentence_transformers/util.py +++ b/sentence_transformers/util.py @@ -714,8 +714,12 @@ def mine_hard_negatives( except Exception: pass - corpus_embeddings = model.encode(corpus, batch_size=batch_size, convert_to_numpy=True, show_progress_bar=True) - query_embeddings = model.encode(queries, batch_size=batch_size, convert_to_numpy=True, show_progress_bar=True) + corpus_embeddings = model.encode( + corpus, batch_size=batch_size, normalize_embeddings=True, convert_to_numpy=True, show_progress_bar=True + ) + query_embeddings = model.encode( + queries, batch_size=batch_size, normalize_embeddings=True, convert_to_numpy=True, show_progress_bar=True + ) index.add(corpus_embeddings) scores_list = [] @@ -731,8 +735,12 @@ def mine_hard_negatives( else: # Embed the corpus and the queries - corpus_embeddings = model.encode(corpus, batch_size=batch_size, convert_to_numpy=True, show_progress_bar=True) - query_embeddings = model.encode(queries, batch_size=batch_size, convert_to_numpy=True, show_progress_bar=True) + corpus_embeddings = model.encode( + corpus, batch_size=batch_size, normalize_embeddings=True, convert_to_numpy=True, show_progress_bar=True + ) + query_embeddings = model.encode( + queries, batch_size=batch_size, normalize_embeddings=True, convert_to_numpy=True, show_progress_bar=True + ) scores = model.similarity(query_embeddings, corpus_embeddings).to(device) # Keep only the range_max + max_positives highest scores. We offset by 1 to potentially include the positive pair