LLMFluencyModel.predict:v0
Version
0
Last updated
Last updated by
Calls:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import weave
from litellm.main import acompletion
from pydantic.main import BaseModel
from pydantic.fields import Field
class FluencyResponse(BaseModel):
response_fluency: int = Field(description="The fluency of the response, 1 if it is fluent, 0 if it is not.")
non_fluency_reason: str = Field(description="The reason for the fluency score, if the response is not fluent.")
@weave.op()
async def predict(self, text: str):
response = await acompletion(
model="openai/o1-mini",
messages=[
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": self.user_prompt.format(text=text)}
],
response_format=FluencyResponse,
)
message = response.choices[0].message
out = FluencyResponse.model_validate(message.message)
return {"flagged": out.response_fluency == 1, "extras": {"reason": out.non_fluency_reason}}