TranslatorModel.predict:v0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
import weave
import litellm
@weave.op()
def predict(self, text: str, target_language: str):
response = litellm.completion(
model=self.model,
messages=[
{"role": "system", "content": f"You are a translator. Translate the given text to {target_language}."},
{"role": "user", "content": text}
],
max_tokens=1024,
temperature=self.temperature
)
return response.choices[0].message.content