TiberiuCristianLeon commited on
Commit
4605ea3
·
verified ·
1 Parent(s): 4225b15

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -627
app.py DELETED
@@ -1,627 +0,0 @@
1
- import gradio as gr
2
- import spaces
3
- import torch
4
- from transformers import T5Tokenizer, T5ForConditionalGeneration, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM, AutoModel, pipeline
5
- from transformers import logging as hflogging
6
- import languagecodes
7
- import httpx, os
8
- import polars as pl
9
-
10
- hflogging.set_verbosity_error()
11
- favourite_langs = {"German": "de", "Romanian": "ro", "English": "en", "-----": "-----"}
12
- df = pl.read_parquet("isolanguages.parquet")
13
- non_empty_isos = df.slice(1).filter(pl.col("ISO639-1") != "").rows()
14
- # all_langs = languagecodes.iso_languages_byname
15
- all_langs = {iso[0]: (iso[1], iso[2], iso[3]) for iso in non_empty_isos} # {'Romanian': ('ro', 'rum', 'ron')}
16
- # iso1_to_name = {codes[0]: lang for entry in all_langs for lang, codes in entry.items()} # {'ro': 'Romanian', 'de': 'German'}
17
- iso1_to_name = {iso[1]: iso[0] for iso in non_empty_isos} # {'ro': 'Romanian', 'de': 'German'}
18
- langs = list(favourite_langs.keys())
19
- langs.extend(list(all_langs.keys())) # Language options as list, add favourite languages first
20
-
21
- models = ["Helsinki-NLP", "QUICKMT", "Argos", "Google", "HPLT", "HPLT-OPUS",
22
- "Helsinki-NLP/opus-mt-tc-bible-big-mul-mul", "Helsinki-NLP/opus-mt-tc-bible-big-mul-deu_eng_nld",
23
- "Helsinki-NLP/opus-mt-tc-bible-big-mul-deu_eng_fra_por_spa", "Helsinki-NLP/opus-mt-tc-bible-big-deu_eng_fra_por_spa-mul",
24
- "Helsinki-NLP/opus-mt-tc-bible-big-roa-deu_eng_fra_por_spa", "Helsinki-NLP/opus-mt-tc-bible-big-deu_eng_fra_por_spa-roa", "Helsinki-NLP/opus-mt-tc-bible-big-roa-en",
25
- "facebook/nllb-200-distilled-600M", "facebook/nllb-200-distilled-1.3B", "facebook/nllb-200-1.3B", "facebook/nllb-200-3.3B",
26
- "facebook/mbart-large-50-many-to-many-mmt", "facebook/mbart-large-50-one-to-many-mmt", "facebook/mbart-large-50-many-to-one-mmt",
27
- "facebook/m2m100_418M", "facebook/m2m100_1.2B", "Lego-MT/Lego-MT",
28
- "bigscience/mt0-small", "bigscience/mt0-base", "bigscience/mt0-large", "bigscience/mt0-xl",
29
- "bigscience/bloomz-560m", "bigscience/bloomz-1b1", "bigscience/bloomz-1b7", "bigscience/bloomz-3b",
30
- "google-t5/t5-small", "google-t5/t5-base", "google-t5/t5-large",
31
- "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", "google/flan-t5-xl",
32
- "google/madlad400-3b-mt", "jbochi/madlad400-3b-mt",
33
- "HuggingFaceTB/SmolLM3-3B", "winninghealth/WiNGPT-Babel-2",
34
- "utter-project/EuroLLM-1.7B", "utter-project/EuroLLM-1.7B-Instruct",
35
- "Unbabel/Tower-Plus-2B", "Unbabel/TowerInstruct-7B-v0.2", "Unbabel/TowerInstruct-Mistral-7B-v0.2"
36
- ]
37
- DEFAULTS = [langs[0], langs[1], models[0]]
38
-
39
- def model_to_cuda(model):
40
- # Move the model to GPU if available
41
- if torch.cuda.is_available():
42
- model = model.to('cuda')
43
- print("CUDA is available! Using GPU.")
44
- else:
45
- print("CUDA not available! Using CPU.")
46
- return model
47
-
48
- def HelsinkiNLPAutoTokenizer(sl, tl, input_text): # deprecated
49
- if model_name == "Helsinki-NLP":
50
- message_text = f'Translated from {sl} to {tl} with {model_name}.'
51
- try:
52
- model_name = f"Helsinki-NLP/opus-mt-{sl}-{tl}"
53
- tokenizer = AutoTokenizer.from_pretrained(model_name)
54
- model = model_to_cuda(AutoModelForSeq2SeqLM.from_pretrained(model_name))
55
- except EnvironmentError:
56
- try:
57
- model_name = f"Helsinki-NLP/opus-tatoeba-{sl}-{tl}"
58
- tokenizer = AutoTokenizer.from_pretrained(model_name)
59
- model = model_to_cuda(AutoModelForSeq2SeqLM.from_pretrained(model_name))
60
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
61
- output_ids = model.generate(input_ids, max_length=512)
62
- translated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
63
- return translated_text, message_text
64
- except EnvironmentError as error:
65
- return f"Error finding model: {model_name}! Try other available language combination.", error
66
-
67
- class Translators:
68
- def __init__(self, model_name: str, sl: str, tl: str, input_text: str):
69
- self.model_name = model_name
70
- self.sl, self.tl = sl, tl
71
- self.input_text = input_text
72
- self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
73
-
74
- def google(self):
75
- url = os.environ['GCLIENT'] + f'sl={self.sl}&tl={self.tl}&q={self.input_text}'
76
- response = httpx.get(url)
77
- return response.json()[0][0][0]
78
-
79
- def simplepipe(self):
80
- try:
81
- pipe = pipeline("translation", model=self.model_name, device=self.device)
82
- translation = pipe(self.input_text)
83
- message = f'Translated from {iso1_to_name[self.sl]} to {iso1_to_name[self.tl]} with {self.model_name}.'
84
- return translation[0]['translation_text'], message
85
- except Exception as error:
86
- return f"Error translating with model: {self.model_name}! Try other available language combination or model.", error
87
-
88
- def hplt(self, opus = False):
89
- # langs = ['ar', 'bs', 'ca', 'en', 'et', 'eu', 'fi', 'ga', 'gl', 'hi', 'hr', 'is', 'mt', 'nn', 'sq', 'sw', 'zh_hant']
90
- hplt_models = ['ar-en', 'bs-en', 'ca-en', 'en-ar', 'en-bs', 'en-ca', 'en-et', 'en-eu', 'en-fi',
91
- 'en-ga', 'en-gl', 'en-hi', 'en-hr', 'en-is', 'en-mt', 'en-nn', 'en-sq', 'en-sw',
92
- 'en-zh_hant', 'et-en', 'eu-en', 'fi-en', 'ga-en', 'gl-en', 'hi-en', 'hr-en',
93
- 'is-en', 'mt-en', 'nn-en', 'sq-en', 'sw-en', 'zh_hant-en']
94
- if opus:
95
- hplt_model = f'HPLT/translate-{self.sl}-{self.tl}-v1.0-hplt_opus' # HPLT/translate-en-hr-v1.0-hplt_opus
96
- else:
97
- hplt_model = f'HPLT/translate-{self.sl}-{self.tl}-v1.0-hplt' # HPLT/translate-en-hr-v1.0-hplt
98
- if f'{self.sl}-{self.tl}' in hplt_models:
99
- pipe = pipeline("translation", model=hplt_model, device=self.device)
100
- translation = pipe(self.input_text)
101
- translated_text = translation[0]['translation_text']
102
- message_text = f'Translated from {iso1_to_name[self.sl]} to {iso1_to_name[self.tl]} with {hplt_model}.'
103
- else:
104
- translated_text = f'HPLT model from {iso1_to_name[self.sl]} to {iso1_to_name[self.tl]} not available!'
105
- message_text = f"Available models: {', '.join(hplt_models)}"
106
- return translated_text, message_text
107
-
108
- @staticmethod
109
- def download_argos_model(from_code, to_code):
110
- import argostranslate.package
111
- print('Downloading model', from_code, to_code)
112
- # Download and install Argos Translate package
113
- argostranslate.package.update_package_index()
114
- available_packages = argostranslate.package.get_available_packages()
115
- package_to_install = next(
116
- filter(lambda x: x.from_code == from_code and x.to_code == to_code, available_packages)
117
- )
118
- argostranslate.package.install_from_path(package_to_install.download())
119
-
120
- def argos(self):
121
- import argostranslate.translate, argostranslate.package
122
- try:
123
- Translators.download_argos_model(self.sl, self.tl) # Download model
124
- translated_text = argostranslate.translate.translate(self.input_text, self.sl, self.tl) # Translate
125
- except StopIteration:
126
- # packages_info = ', '.join(f"{pkg.get_description()}->{str(pkg.links)} {str(pkg.source_languages)}" for pkg in argostranslate.package.get_available_packages())
127
- packages_info = ', '.join(f"{pkg.from_name} ({pkg.from_code}) -> {pkg.to_name} ({pkg.to_code})" for pkg in argostranslate.package.get_available_packages())
128
- translated_text = f"No Argos model for {self.sl} to {self.tl}. Try other model or languages combination from the available Argos models: {packages_info}."
129
- except Exception as error:
130
- translated_text = error
131
- return translated_text
132
-
133
- @staticmethod
134
- def quickmttranslate(model_path, input_text):
135
- from quickmt import Translator
136
- # 'auto' auto-detects GPU, set to "cpu" to force CPU inference
137
- device = 'gpu' if torch.cuda.is_available() else 'cpu'
138
- translator = Translator(str(model_path), device = device)
139
- # translation = Translator(f"./quickmt-{self.sl}-{self.tl}/", device="auto", inter_threads=2)
140
- # set beam size to 1 for faster speed (but lower quality)
141
- translation = translator(input_text, beam_size=5, max_input_length = 512, max_decoding_length = 512)
142
- # print(model_path, input_text, translation)
143
- return translation
144
-
145
- @staticmethod
146
- def quickmtdownload(model_name):
147
- from quickmt.hub import hf_download
148
- from pathlib import Path
149
- model_path = Path("/quickmt/models") / model_name
150
- if not model_path.exists():
151
- hf_download(
152
- model_name = f"quickmt/{model_name}",
153
- output_dir=Path("/quickmt/models") / model_name,
154
- )
155
- return model_path
156
-
157
- def quickmt(self):
158
- model_name = f"quickmt-{self.sl}-{self.tl}"
159
- # from quickmt.hub import hf_list
160
- # quickmt_models = [i.split("/quickmt-")[1] for i in hf_list()]
161
- # quickmt_models.sort()
162
- # print(quickmt_models)
163
- quickmt_models = ['ar-en', 'bn-en', 'cs-en', 'da-en', 'de-en', 'el-en', 'en-ar', 'en-bn', 'en-cs', 'en-de', 'en-el', 'en-es',
164
- 'en-fa', 'en-fr', 'en-he', 'en-hi', 'en-hu', 'en-id', 'en-it', 'en-ja', 'en-ko', 'en-lv', 'en-pl', 'en-pt',
165
- 'en-ro', 'en-ru', 'en-th', 'en-tr', 'en-ur', 'en-vi', 'en-zh', 'es-en', 'fa-en', 'fr-en', 'he-en', 'hi-en',
166
- 'hu-en', 'id-en', 'it-en', 'ja-en', 'ko-en', 'lv-en', 'pl-en', 'pt-en', 'ro-en', 'ru-en', 'th-en', 'tr-en', 'ur-en', 'vi-en', 'zh-en']
167
- # available_languages = list(set([lang for model in quickmt_models for lang in model.split('-')]))
168
- # available_languages.sort()
169
- available_languages = ['ar', 'bn', 'cs', 'da', 'de', 'el', 'en', 'es', 'fa', 'fr', 'he', 'hi', 'hu',
170
- 'id', 'it', 'ja', 'ko', 'lv', 'pl', 'pt', 'ro', 'ru', 'th', 'tr', 'ur', 'vi', 'zh']
171
- # Direct translation model
172
- if f"{self.sl}-{self.tl}" in quickmt_models:
173
- model_path = Translators.quickmtdownload(model_name)
174
- translated_text = Translators.quickmttranslate(model_path, self.input_text)
175
- message = f'Translated from {iso1_to_name[self.sl]} to {iso1_to_name[self.tl]} with {model_name}.'
176
- # Pivot language English
177
- elif self.sl in available_languages and self.tl in available_languages:
178
- model_name = f"quickmt-{self.sl}-en"
179
- model_path = Translators.quickmtdownload(model_name)
180
- entranslation = Translators.quickmttranslate(model_path, self.input_text)
181
- model_name = f"quickmt-en-{self.tl}"
182
- model_path = Translators.quickmtdownload(model_name)
183
- translated_text = Translators.quickmttranslate(model_path, entranslation)
184
- message = f'Translated from {iso1_to_name[self.sl]} to {iso1_to_name[self.tl]} with Quickmt using pivot language English.'
185
- else:
186
- translated_text = f'No Quickmt model available for translation from {iso1_to_name[self.sl]} to {iso1_to_name[self.tl]}!'
187
- message = f"Available models: {', '.join(quickmt_models)}"
188
- return translated_text, message
189
-
190
- def HelsinkiNLP_mulroa(self):
191
- try:
192
- pipe = pipeline("translation", model=self.model_name, device=self.device)
193
- iso1to3 = {iso[1]: iso[3] for iso in non_empty_isos} # {'ro': 'ron'}
194
- iso3tl = iso1to3.get(self.tl) # 'deu', 'ron', 'eng', 'fra'
195
- translation = pipe(f'>>{iso3tl}<< {self.input_text}')
196
- return translation[0]['translation_text'], f'Translated from {iso1_to_name[self.sl]} to {iso1_to_name[self.tl]} with {self.model_name}.'
197
- except Exception as error:
198
- return f"Error translating with model: {self.model_name}! Try other available language combination.", error
199
-
200
- def HelsinkiNLP(self):
201
- try: # Standard bilingual model
202
- model_name = f"Helsinki-NLP/opus-mt-{self.sl}-{self.tl}"
203
- pipe = pipeline("translation", model=model_name, device=self.device)
204
- translation = pipe(self.input_text)
205
- return translation[0]['translation_text'], f'Translated from {iso1_to_name[self.sl]} to {iso1_to_name[self.tl]} with {model_name}.'
206
- except EnvironmentError:
207
- try: # Tatoeba models
208
- model_name = f"Helsinki-NLP/opus-tatoeba-{self.sl}-{self.tl}"
209
- pipe = pipeline("translation", model=model_name, device=self.device)
210
- translation = pipe(self.input_text)
211
- return translation[0]['translation_text'], f'Translated from {iso1_to_name[self.sl]} to {iso1_to_name[self.tl]} with {model_name}.'
212
- except EnvironmentError as error:
213
- self.model_name = "Helsinki-NLP/opus-mt-tc-bible-big-mul-mul" # Last resort: try multi to multi
214
- return self.HelsinkiNLP_mulroa()
215
- except KeyError as error:
216
- return f"Error: Translation direction {self.sl} to {self.tl} is not supported by Helsinki Translation Models", error
217
-
218
- def LegoMT(self):
219
- from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
220
- model = M2M100ForConditionalGeneration.from_pretrained(self.model_name) # "Lego-MT/Lego-MT"
221
- tokenizer = M2M100Tokenizer.from_pretrained(self.model_name)
222
- tokenizer.src_lang = self.sl
223
- encoded = tokenizer(self.input_text, return_tensors="pt")
224
- generated_tokens = model.generate(**encoded, forced_bos_token_id=tokenizer.get_lang_id(self.tl))
225
- return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
226
-
227
- def madlad(self):
228
- model = T5ForConditionalGeneration.from_pretrained(self.model_name, device_map="auto")
229
- tokenizer = T5Tokenizer.from_pretrained(self.model_name)
230
- text = f"<2{self.tl}> {self.input_text}"
231
- # input_ids = tokenizer(text, return_tensors="pt").input_ids.to(model.device)
232
- # outputs = model.generate(input_ids=input_ids, max_new_tokens=512)
233
- # return tokenizer.decode(outputs[0], skip_special_tokens=True)
234
- # return tokenizer.batch_decode(outputs, skip_special_tokens=True)
235
- # Use a pipeline as a high-level helper
236
- translator = pipeline('translation', model=model, tokenizer=tokenizer, src_lang=self.sl, tgt_lang=self.tl)
237
- translated_text = translator(text, max_length=512)
238
- return translated_text[0]['translation_text']
239
-
240
- def smollm(self):
241
- tokenizer = AutoTokenizer.from_pretrained(self.model_name)
242
- model = AutoModelForCausalLM.from_pretrained(self.model_name)
243
- prompt = f"""Translate the following {self.sl} text to {self.tl}, generating only the translated text and maintaining the original meaning and tone:
244
- {self.input_text}
245
- Translation:"""
246
- inputs = tokenizer(prompt, return_tensors="pt")
247
- outputs = model.generate(
248
- inputs.input_ids,
249
- max_length=len(inputs.input_ids[0]) + 150,
250
- temperature=0.3,
251
- do_sample=True
252
- )
253
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
254
- print(response)
255
- return response.split("Translation:")[-1].strip()
256
-
257
- def flan(self):
258
- tokenizer = T5Tokenizer.from_pretrained(self.model_name, legacy=False)
259
- model = T5ForConditionalGeneration.from_pretrained(self.model_name)
260
- prompt = f"translate {self.sl} to {self.tl}: {self.input_text}"
261
- input_ids = tokenizer(prompt, return_tensors="pt").input_ids
262
- outputs = model.generate(input_ids)
263
- return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
264
-
265
- def tfive(self):
266
- tokenizer = T5Tokenizer.from_pretrained(self.model_name)
267
- model = T5ForConditionalGeneration.from_pretrained(self.model_name, device_map="auto")
268
- prompt = f"translate {self.sl} to {self.tl}: {self.input_text}"
269
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
270
- output_ids = model.generate(input_ids, max_length=512)
271
- translated_text = tokenizer.decode(output_ids[0], skip_special_tokens=True).strip()
272
- return translated_text
273
-
274
- def mbart_many_to_many(self):
275
- from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
276
- model = MBartForConditionalGeneration.from_pretrained(self.model_name)
277
- tokenizer = MBart50TokenizerFast.from_pretrained(self.model_name)
278
- # translate source to target
279
- tokenizer.src_lang = languagecodes.mbart_large_languages[self.sl]
280
- encoded = tokenizer(self.input_text, return_tensors="pt")
281
- generated_tokens = model.generate(
282
- **encoded,
283
- forced_bos_token_id=tokenizer.lang_code_to_id[languagecodes.mbart_large_languages[self.tl]]
284
- )
285
- return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
286
-
287
- def mbart_one_to_many(self):
288
- # translate from English
289
- from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
290
- model = MBartForConditionalGeneration.from_pretrained(self.model_name)
291
- tokenizer = MBart50TokenizerFast.from_pretrained(self.model_name, src_lang="en_XX")
292
- model_inputs = tokenizer(self.input_text, return_tensors="pt")
293
- langid = languagecodes.mbart_large_languages[self.tl]
294
- generated_tokens = model.generate(
295
- **model_inputs,
296
- forced_bos_token_id=tokenizer.lang_code_to_id[langid]
297
- )
298
- return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
299
-
300
- def mbart_many_to_one(self):
301
- # translate to English
302
- from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
303
- model = MBartForConditionalGeneration.from_pretrained(self.model_name)
304
- tokenizer = MBart50TokenizerFast.from_pretrained(self.model_name)
305
- tokenizer.src_lang = languagecodes.mbart_large_languages[self.sl]
306
- encoded = tokenizer(self.input_text, return_tensors="pt")
307
- generated_tokens = model.generate(**encoded)
308
- return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
309
-
310
- def mtom(self):
311
- from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
312
- model = M2M100ForConditionalGeneration.from_pretrained(self.model_name)
313
- tokenizer = M2M100Tokenizer.from_pretrained(self.model_name)
314
- tokenizer.src_lang = self.sl
315
- encoded = tokenizer(self.input_text, return_tensors="pt")
316
- generated_tokens = model.generate(**encoded, forced_bos_token_id=tokenizer.get_lang_id(self.tl))
317
- return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
318
-
319
- def bigscience(self):
320
- tokenizer = AutoTokenizer.from_pretrained(self.model_name)
321
- model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name)
322
- self.input_text = self.input_text if self.input_text.endswith('.') else f'{self.input_text}.'
323
- inputs = tokenizer.encode(f"Translate to {self.tl}: {self.input_text}", return_tensors="pt")
324
- outputs = model.generate(inputs)
325
- translation = tokenizer.decode(outputs[0])
326
- translation = translation.replace('<pad> ', '').replace('</s>', '')
327
- return translation
328
-
329
- def bloomz(self):
330
- tokenizer = AutoTokenizer.from_pretrained(self.model_name)
331
- model = AutoModelForCausalLM.from_pretrained(self.model_name)
332
- self.input_text = self.input_text if self.input_text.endswith('.') else f'{self.input_text}.'
333
- # inputs = tokenizer.encode(f"Translate from {self.sl} to {self.tl}: {self.input_text} Translation:", return_tensors="pt")
334
- inputs = tokenizer.encode(f"Translate to {self.tl}: {self.input_text}", return_tensors="pt")
335
- outputs = model.generate(inputs)
336
- translation = tokenizer.decode(outputs[0])
337
- translation = translation.replace('<pad> ', '').replace('</s>', '')
338
- translation = translation.split('Translation:')[-1].strip() if 'Translation:' in translation else translation.strip()
339
- return translation
340
-
341
- def nllb(self):
342
- tokenizer = AutoTokenizer.from_pretrained(self.model_name, src_lang=self.sl)
343
- # model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name, device_map="auto", torch_dtype=torch.bfloat16)
344
- model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name)
345
- translator = pipeline('translation', model=model, tokenizer=tokenizer, src_lang=self.sl, tgt_lang=self.tl)
346
- translated_text = translator(self.input_text, max_length=512)
347
- return translated_text[0]['translation_text']
348
-
349
- def wingpt(self):
350
- model = AutoModelForCausalLM.from_pretrained(
351
- self.model_name,
352
- torch_dtype="auto",
353
- device_map="auto"
354
- )
355
- tokenizer = AutoTokenizer.from_pretrained(self.model_name)
356
- # input_json = '{"input_text": self.input_text}'
357
- messages = [
358
- {"role": "system", "content": f"Translate this to {self.tl} language"},
359
- {"role": "user", "content": self.input_text}
360
- ]
361
-
362
- text = tokenizer.apply_chat_template(
363
- messages,
364
- tokenize=False,
365
- add_generation_prompt=True
366
- )
367
- model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
368
-
369
- generated_ids = model.generate(
370
- **model_inputs,
371
- max_new_tokens=512,
372
- temperature=0.1
373
- )
374
-
375
- generated_ids = [
376
- output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
377
- ]
378
- print(tokenizer.batch_decode(generated_ids, skip_special_tokens=True))
379
- output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
380
- result = output.split('\n')[-1].strip() if '\n' in output else output.strip()
381
- return result
382
-
383
- def eurollm(self):
384
- tokenizer = AutoTokenizer.from_pretrained(self.model_name)
385
- model = AutoModelForCausalLM.from_pretrained(self.model_name)
386
- prompt = f"{self.sl}: {self.input_text} {self.tl}:"
387
- inputs = tokenizer(prompt, return_tensors="pt")
388
- outputs = model.generate(**inputs, max_new_tokens=512)
389
- output = tokenizer.decode(outputs[0], skip_special_tokens=True)
390
- print(output)
391
- # result = output.rsplit(f'{self.tl}:')[-1].strip() if f'{self.tl}:' in output else output.strip()
392
- result = output.rsplit(f'{self.tl}:')[-1].strip() if '\n' in output or f'{self.tl}:' in output else output.strip()
393
- return result
394
-
395
- def eurollm_instruct(self):
396
- tokenizer = AutoTokenizer.from_pretrained(self.model_name)
397
- model = AutoModelForCausalLM.from_pretrained(self.model_name)
398
- text = f'<|im_start|>system\n<|im_end|>\n<|im_start|>user\nTranslate the following {self.sl} source text to {self.tl}:\n{self.sl}: {self.input_text} \n{self.tl}: <|im_end|>\n<|im_start|>assistant\n'
399
- inputs = tokenizer(text, return_tensors="pt")
400
- outputs = model.generate(**inputs, max_new_tokens=512)
401
- output = tokenizer.decode(outputs[0], skip_special_tokens=True)
402
- if f'{self.tl}:' in output:
403
- output = output.rsplit(f'{self.tl}:')[-1].strip().replace('assistant\n', '').strip()
404
- return output
405
-
406
- def unbabel(self):
407
- pipe = pipeline("text-generation", model=self.model_name, torch_dtype=torch.bfloat16, device_map="auto")
408
- messages = [{"role": "user",
409
- "content": f"Translate the following text from {self.sl} into {self.tl}.\n{self.sl}: {self.input_text}.\n{self.tl}:"}]
410
- prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
411
- tokenized_input = pipe.tokenizer(self.input_text, return_tensors="pt")
412
- num_input_tokens = len(tokenized_input["input_ids"][0])
413
- max_new_tokens = round(num_input_tokens + 0.5 * num_input_tokens)
414
- outputs = pipe(prompt, max_new_tokens=max_new_tokens, do_sample=False)
415
- translated_text = outputs[0]["generated_text"]
416
- print(f"Input chars: {len(self.input_text)}", f"Input tokens: {num_input_tokens}", f"max_new_tokens: {max_new_tokens}",
417
- "Chars to tokens ratio:", round(len(self.input_text) / num_input_tokens, 2), f"Raw translation: {translated_text}")
418
- markers = ["<end_of_turn>", "<|im_end|>", "<|im_start|>assistant"] # , "\n"
419
- for marker in markers:
420
- if marker in translated_text:
421
- translated_text = translated_text.split(marker)[1].strip()
422
- translated_text = translated_text.replace('Answer:', '', 1).strip() if translated_text.startswith('Answer:') else translated_text
423
- translated_text = translated_text.split("Translated text:")[0].strip() if "Translated text:" in translated_text else translated_text
424
- split_translated_text = translated_text.split('\n', translated_text.count('\n'))
425
- translated_text = '\n'.join(split_translated_text[:self.input_text.count('\n')+1])
426
- return translated_text
427
-
428
- def bergamot(model_name: str = 'deen', sl: str = 'de', tl: str = 'en', input_text: str = 'Hallo, mein Freund'):
429
- try:
430
- import bergamot
431
- # input_text = [input_text] if isinstance(input_text, str) else input_text
432
- config = bergamot.ServiceConfig(numWorkers=4)
433
- service = bergamot.Service(config)
434
- model = service.modelFromConfigPath(f"./{model_name}/bergamot.config.yml")
435
- options = bergamot.ResponseOptions(alignment=False, qualityScores=False, HTML=False)
436
- rawresponse = service.translate(model, bergamot.VectorString(input_text), options)
437
- translated_text: str = next(iter(rawresponse)).target.text
438
- message_text = f"Translated from {sl} to {tl} with Bergamot {model_name}."
439
- except Exception as error:
440
- response = error
441
- return translated_text, message_text
442
-
443
- @spaces.GPU
444
- def translate_text(input_text: str, s_language: str, t_language: str, model_name: str) -> tuple[str, str]:
445
- """
446
- Translates the input text from the source language to the target language using a specified model.
447
-
448
- Parameters:
449
- input_text (str): The source text to be translated
450
- s_language (str): The source language of the input text
451
- t_language (str): The target language in which the input text is translated
452
- model_name (str): The selected translation model name
453
-
454
- Returns:
455
- tuple:
456
- translated_text(str): The input text translated to the selected target language
457
- message_text(str): A descriptive message summarizing the translation process. Example: "Translated from English to German with Helsinki-NLP."
458
-
459
- Example:
460
- >>> translate_text("Hello world", "English", "German", "Helsinki-NLP")
461
- ("Hallo Welt", "Translated from English to German with Helsinki-NLP.")
462
- """
463
-
464
- sl = all_langs[s_language][0]
465
- tl = all_langs[t_language][0]
466
- message_text = f'Translated from {s_language} to {t_language} with {model_name}'
467
- if not input_text or input_text.strip() == '':
468
- translated_text = f'No input text entered!'
469
- message_text = 'Please enter a text to translate!'
470
- return translated_text, message_text
471
- if sl == tl:
472
- translated_text = f'Source language {s_language} identical to target language {t_language}!'
473
- message_text = 'Please choose different target and source language!'
474
- return translated_text, message_text
475
- translated_text = None
476
- try:
477
- if "-mul" in model_name.lower() or "mul-" in model_name.lower() or "-roa" in model_name.lower():
478
- translated_text, message_text = Translators(model_name, sl, tl, input_text).HelsinkiNLP_mulroa()
479
-
480
- elif model_name == "Helsinki-NLP":
481
- translated_text, message_text = Translators(model_name, sl, tl, input_text).HelsinkiNLP()
482
-
483
- elif model_name == 'Argos':
484
- translated_text = Translators(model_name, sl, tl, input_text).argos()
485
-
486
- elif model_name == "QUICKMT":
487
- translated_text, message_text = Translators(model_name, sl, tl, input_text).quickmt()
488
-
489
- elif model_name == 'Google':
490
- translated_text = Translators(model_name, sl, tl, input_text).google()
491
-
492
- elif model_name == "Helsinki-NLP/opus-mt-tc-bible-big-roa-en":
493
- translated_text, message_text = Translators(model_name, sl, tl, input_text).simplepipe()
494
-
495
- elif "m2m" in model_name.lower():
496
- translated_text = Translators(model_name, sl, tl, input_text).mtom()
497
-
498
- elif "lego" in model_name.lower():
499
- translated_text = Translators(model_name, sl, tl, input_text).LegoMT()
500
-
501
- elif model_name.startswith('google-t5'):
502
- translated_text = Translators(model_name, s_language, t_language, input_text).tfive()
503
-
504
- elif 'flan' in model_name.lower():
505
- translated_text = Translators(model_name, s_language, t_language, input_text).flan()
506
-
507
- elif 'madlad' in model_name.lower():
508
- translated_text = Translators(model_name, sl, tl, input_text).madlad()
509
-
510
- elif 'mt0' in model_name.lower():
511
- translated_text = Translators(model_name, s_language, t_language, input_text).bigscience()
512
-
513
- elif 'bloomz' in model_name.lower():
514
- translated_text = Translators(model_name, s_language, t_language, input_text).bloomz()
515
-
516
- elif 'nllb' in model_name.lower():
517
- nnlbsl, nnlbtl = languagecodes.nllb_language_codes[s_language], languagecodes.nllb_language_codes[t_language]
518
- translated_text = Translators(model_name, nnlbsl, nnlbtl, input_text).nllb()
519
-
520
- elif model_name == "facebook/mbart-large-50-many-to-many-mmt":
521
- translated_text = Translators(model_name, s_language, t_language, input_text).mbart_many_to_many()
522
-
523
- elif model_name == "facebook/mbart-large-50-one-to-many-mmt":
524
- translated_text = Translators(model_name, s_language, t_language, input_text).mbart_one_to_many()
525
-
526
- elif model_name == "facebook/mbart-large-50-many-to-one-mmt":
527
- translated_text = Translators(model_name, s_language, t_language, input_text).mbart_many_to_one()
528
-
529
- elif model_name == "utter-project/EuroLLM-1.7B-Instruct":
530
- translated_text = Translators(model_name, s_language, t_language, input_text).eurollm_instruct()
531
-
532
- elif model_name == "utter-project/EuroLLM-1.7B":
533
- translated_text = Translators(model_name, s_language, t_language, input_text).eurollm()
534
-
535
- elif 'Unbabel' in model_name:
536
- translated_text = Translators(model_name, s_language, t_language, input_text).unbabel()
537
-
538
- elif model_name == "HuggingFaceTB/SmolLM3-3B":
539
- translated_text = Translators(model_name, s_language, t_language, input_text).smollm()
540
-
541
- elif model_name == "winninghealth/WiNGPT-Babel-2":
542
- translated_text = Translators(model_name, s_language, t_language, input_text).wingpt()
543
-
544
- elif "HPLT" in model_name:
545
- if model_name == "HPLT-OPUS":
546
- translated_text, message_text = Translators(model_name, sl, tl, input_text).hplt(opus = True)
547
- else:
548
- translated_text, message_text = Translators(model_name, sl, tl, input_text).hplt()
549
-
550
- elif model_name == "Bergamot":
551
- translated_text, message_text = Translators(model_name, s_language, t_language, input_text).bergamot()
552
-
553
- except Exception as error:
554
- translated_text = error
555
- finally:
556
- print(input_text, translated_text, message_text)
557
- return translated_text, message_text
558
-
559
- # Function to swap dropdown values
560
- def swap_languages(src_lang, tgt_lang):
561
- return tgt_lang, src_lang
562
-
563
- def get_info(model_name: str, sl: str = None, tl: str = None):
564
- helsinki = '### [Helsinki-NLP](https://huggingface.co/Helsinki-NLP "Helsinki-NLP")'
565
- if model_name == "Helsinki-NLP" and sl and tl:
566
- url = f'https://huggingface.co/{model_name}/opus-mt-{sl}-{tl}/raw/main/README.md'
567
- response = httpx.get(url).text
568
- if 'Repository not found' in response or 'Invalid username or password' in response:
569
- return helsinki
570
- return response
571
- elif model_name == "Argos":
572
- return httpx.get(f'https://huggingface.co/TiberiuCristianLeon/Argostranslate/raw/main/README.md').text
573
- elif "HPLT" in model_name:
574
- return """[HPLT Uni direction translation models](https://huggingface.co/collections/HPLT/hplt-12-uni-direction-translation-models)
575
- ['ar-en', 'bs-en', 'ca-en', 'en-ar', 'en-bs', 'en-ca', 'en-et', 'en-eu', 'en-fi',
576
- 'en-ga', 'en-gl', 'en-hi', 'en-hr', 'en-is', 'en-mt', 'en-nn', 'en-sq', 'en-sw',
577
- 'en-zh_hant', 'et-en', 'eu-en', 'fi-en', 'ga-en', 'gl-en', 'hi-en', 'hr-en',
578
- 'is-en', 'mt-en', 'nn-en', 'sq-en', 'sw-en', 'zh_hant-en']"""
579
- elif "QUICKMT" in model_name:
580
- return """[QUICKMT](https://huggingface.co/quickmt)
581
- ['ar-en', 'bn-en', 'cs-en', 'da-en', 'de-en', 'el-en', 'en-ar', 'en-bn', 'en-cs', 'en-de', 'en-el', 'en-es',
582
- 'en-fa', 'en-fr', 'en-he', 'en-hi', 'en-hu', 'en-id', 'en-it', 'en-ja', 'en-ko', 'en-lv', 'en-pl', 'en-pt',
583
- 'en-ro', 'en-ru', 'en-th', 'en-tr', 'en-ur', 'en-vi', 'en-zh', 'es-en', 'fa-en', 'fr-en', 'he-en', 'hi-en',
584
- 'hu-en', 'id-en', 'it-en', 'ja-en', 'ko-en', 'lv-en', 'pl-en', 'pt-en', 'ro-en', 'ru-en', 'th-en', 'tr-en', 'ur-en', 'vi-en', 'zh-en']"""
585
- elif model_name == "Google":
586
- return "Google Translate Online"
587
- else:
588
- return httpx.get(f'https://huggingface.co/{model_name}/raw/main/README.md').text
589
-
590
- def create_interface():
591
- with gr.Blocks() as interface:
592
- gr.Markdown("### Machine Text Translation with Gradio API and MCP Server")
593
- input_text = gr.Textbox(label="Enter text to translate:", placeholder="Type your text here, maximum 512 tokens", autofocus=True, submit_btn='Translate', max_length=512)
594
-
595
- with gr.Row(variant="compact"):
596
- s_language = gr.Dropdown(choices=langs, value = DEFAULTS[0], label="Source language", interactive=True, scale=2)
597
- t_language = gr.Dropdown(choices=langs, value = DEFAULTS[1], label="Target language", interactive=True, scale=2)
598
- swap_btn = gr.Button("Swap Languages", size="md", scale=1)
599
- swap_btn.click(fn=swap_languages, inputs=[s_language, t_language], outputs=[s_language, t_language], api_name=False, show_api=False)
600
- # with gr.Row(equal_height=True):
601
- model_name = gr.Dropdown(choices=models, label=f"Select a model. Default is {DEFAULTS[2]}.", value=DEFAULTS[2], interactive=True, scale=2)
602
- # translate_btn = gr.Button(value="Translate", scale=1)
603
-
604
- translated_text = gr.Textbox(label="Translated text:", placeholder="Display field for translation", interactive=False, show_copy_button=True, lines=2)
605
- message_text = gr.Textbox(label="Messages:", placeholder="Display field for status and error messages", interactive=False,
606
- value=f'Default translation settings: from {s_language.value} to {t_language.value} with {model_name.value}.')
607
- allmodels = gr.HTML(label="Model links:", value=', '.join([f'<a href="https://huggingface.co/{model}">{model}</a>' for model in models]))
608
- model_info = gr.Markdown(label="Model info:", value=get_info(DEFAULTS[2], DEFAULTS[0], DEFAULTS[1]), show_copy_button=True)
609
- model_name.change(fn=get_info, inputs=[model_name, s_language, t_language], outputs=model_info, api_name=False, show_api=False)
610
-
611
- # translate_btn.click(
612
- # fn=translate_text,
613
- # inputs=[input_text, s_language, t_language, model_name],
614
- # outputs=[translated_text, message_text]
615
- # )
616
- input_text.submit(
617
- fn=translate_text,
618
- inputs=[input_text, s_language, t_language, model_name],
619
- outputs=[translated_text, message_text]
620
- )
621
-
622
- return interface
623
-
624
- interface = create_interface()
625
- if __name__ == "__main__":
626
- interface.launch(mcp_server=True)
627
- # interface.queue().launch(server_name="0.0.0.0", show_error=True, server_port=7860, mcp_server=True)