LLMFluencyModel.predict:v3
Version
3
Last updated
Last updated by
Calls:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import weave
from litellm.main import acompletion
from pydantic.main import BaseModel
from pydantic.fields import Field
class FluencyResponse(BaseModel):
response_fluency: int = Field(description="The fluency of the response, 1 if it is fluent, 0 if it is not.")
non_fluency_reason: str = Field(description="The reason for the fluency score, if the response is not fluent.")
@weave.op()
async def predict(self, text: str):
response = await acompletion(
model="anthropic/claude-3-5-sonnet-20240620",
messages=[
{"role": "system", "content": self.system_prompt},
{"role": "user", "content": self.user_prompt.format(text=text)}
],
response_format=FluencyResponse,
)
out = FluencyResponse.model_validate_json(response.choices[0].message.content)
return {"flagged": out.response_fluency == 1, "extras": {"reason": out.non_fluency_reason}}