EvaluatorModel.predict:v1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import weave
import json
rag_engine = weave.storage.artifact_path_ref('rag_engine').get()
@weave.op()
async def get_eval_record(
question: str,
) -> dict:
response = await rag_engine.agenerate(question)
response = json.loads(response)
sources = response["sources"]
sources = [source["highlights"][0] for source in sources]
return {
"generated_answer": response["answer"],
"retrieved_contexts": sources,
"answer_in_context": response["answerInContext"],
}
@weave.op()
async def predict(self, question: str) -> dict:
# Model logic goes here
prediction = await get_eval_record(question)