From 82c0987956c619a5055939b57fb31f27b8bb605a Mon Sep 17 00:00:00 2001 From: Nils Reimers Date: Fri, 19 Mar 2021 22:40:25 +0100 Subject: [PATCH] update readme and version number --- README.md | 7 ++++--- sentence_transformers/__init__.py | 2 +- setup.py | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 3f809d5cd..5489aaa88 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Sentence Transformers: Multilingual Sentence Embeddings using BERT / RoBERTa / XLM-RoBERTa & Co. with PyTorch -This framework provides an easy method to compute dense vector representations for sentences and paragraphs (also known as sentence embeddings). The models are based on transformer networks like BERT / RoBERTa / XLM-RoBERTa etc. and are tuned specificially meaningul sentence embeddings such that sentences with similar meanings are close in vector space. +This framework provides an easy method to compute dense vector representations for sentences, paragraphs, and images. The models are based on transformer networks like BERT / RoBERTa / XLM-RoBERTa etc. and achieve state-of-the-art performance in various task. Text is embedding in vector space such that similar text is close and can efficiently be found using cosine similarity. We provide an increasing number of **[state-of-the-art pretrained models](https://www.sbert.net/docs/pretrained_models.html)** for more than 100 languages, fine-tuned for various use-cases. @@ -11,7 +11,7 @@ Further, this framework allows an easy **[fine-tuning of custom embeddings mode For the **full documentation**, see [www.SBERT.net](https://www.sbert.net), as well as our publications: - [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](https://arxiv.org/abs/1908.10084) (EMNLP 2019) - [Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation](https://arxiv.org/abs/2004.09813) (EMNLP 2020) -- [Augmented SBERT: Data Augmentation Method for Improving Bi-Encoders for Pairwise Sentence Scoring Tasks](https://arxiv.org/abs/2010.08240) (arXiv 2020) +- [Augmented SBERT: Data Augmentation Method for Improving Bi-Encoders for Pairwise Sentence Scoring Tasks](https://arxiv.org/abs/2010.08240) (NAACL 2021) - [The Curse of Dense Low-Dimensional Information Retrieval for Large Index Sizes](https://arxiv.org/abs/2012.14210) (arXiv 2020) @@ -125,7 +125,8 @@ You can use this framework for: - [Semantic Search](https://www.sbert.net/examples/applications/semantic-search/README.html) - [Retrieve & Re-Rank](https://www.sbert.net/examples/applications/retrieve_rerank/README.html) - [Text Summarization](https://www.sbert.net/examples/applications/text-summarization/README.html) - +- [Image Search, Clustering & Duplicate Detection](https://www.sbert.net/examples/applications/image-search/README.html) + and many more use-cases. diff --git a/sentence_transformers/__init__.py b/sentence_transformers/__init__.py index 64191fdae..83ec3b23f 100644 --- a/sentence_transformers/__init__.py +++ b/sentence_transformers/__init__.py @@ -1,4 +1,4 @@ -__version__ = "1.0.1" +__version__ = "1.0.2" __DOWNLOAD_SERVER__ = 'http://sbert.net/models/' from .datasets import SentencesDataset, ParallelSentencesDataset from .LoggingHandler import LoggingHandler diff --git a/setup.py b/setup.py index 401a43eab..c14c15086 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ setup( name="sentence-transformers", - version="1.0.1", + version="1.0.2", author="Nils Reimers", author_email="info@nils-reimers.de", description="Sentence Embeddings using BERT / RoBERTa / XLM-R", @@ -15,7 +15,7 @@ long_description_content_type="text/markdown", license="Apache License 2.0", url="https://github.com/UKPLab/sentence-transformers", - download_url="https://github.com/UKPLab/sentence-transformers/archive/v0.4.2.zip", + download_url="https://github.com/UKPLab/sentence-transformers/archive/v1.0.2.zip", packages=find_packages(), install_requires=[ 'transformers>=3.1.0,<5.0.0',