# /// script # requires-python = ">=3.12" # dependencies = [ # "torch", # "torchvision", # "transformers", # "accelerate", # "peft", # ] # /// try: # The sentences to encode sentence_high = [ "The chef prepared a delicious meal for the guests.", "A tasty dinner was cooked by the chef for the visitors." ] sentence_medium = [ "She is an expert in machine learning.", "He has a deep interest in artificial intelligence." ] sentence_low = [ "The weather in Tokyo is sunny today.", "I need to buy groceries for the week." ] for sentence in [sentence_high, sentence_medium, sentence_low]: print("🙋‍♂️") print(sentence) embeddings = model.encode(sentence) similarities = model.similarity(embeddings[0], embeddings[1]) print("`-> 🤖 score: ", similarities.numpy()[0][0]) with open('google_embeddinggemma-300m_2.txt', 'w', encoding='utf-8') as f: f.write('Everything was good in google_embeddinggemma-300m_2.txt') except Exception as e: with open('google_embeddinggemma-300m_2.txt', 'w', encoding='utf-8') as f: import traceback traceback.print_exc(file=f) finally: from huggingface_hub import upload_file upload_file( path_or_fileobj='google_embeddinggemma-300m_2.txt', repo_id='model-metadata/code_execution_files', path_in_repo='google_embeddinggemma-300m_2.txt', repo_type='dataset', )