OpenaiFluencyModel.predict:v2
Version
2
Last updated
Last updated by
Calls:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import weave
from pydantic.main import BaseModel
from pydantic.fields import Field
openai_client = "<openai.AsyncOpenAI object at 0x172a0fc20>"
class FluencyResponse(BaseModel):
response_fluency: int = Field(description="The fluency of the response, 1 if it is fluent, 0 if it is not.")
non_fluency_reason: str = Field(description="The reason for the fluency score, if the response is not fluent.")
@weave.op()
async def predict(self, text: str):
response = await openai_client.beta.chat.completions.parse(
model="gpt-4o",
messages=[
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": self.user_prompt.format(text=text)}
],
response_format=FluencyResponse,
)
message = response.choices[0].message
if message.parsed:
score_response = message.parsed.model_dump()
return {"flagged": score_response["response_fluency"] == 1, "extras": {"reason": score_response["non_fluency_reason"]}}
else: