OpenaiFluencyModel.predict:v1
Version
1
Last updated
Last updated by
Calls:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
import weave
<error>
@weave.op()
async def predict(self, text: str):
response = await openai_client.beta.chat.completions.parse(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": self.user_prompt.format(text=text)}
],
response_format=FluencyResponse,
)
message = response.choices[0].message
if message.parsed:
score_response = message.parsed.model_dump()
return {"flagged": score_response["response_fluency"] == 0, "extras": {"reason": score_response["non_fluency_reason"]}}
else:
return None