OpenaiFluencyModel.predict:v0
Version
0
Last updated
Last updated by
Calls:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import weave
<error>
@weave.op()
async def predict(self, text: str):
response = await openai_client.beta.chat.completions.parse(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": self.user_prompt.format(text=text)}
],
response_format=FluencyResponse,
)
message = response.choices[0].message
if message.parsed:
return message.parsed.model_dump()
else:
return None