Fireworks AI <> Weave Integration
Ultra-brief guide to trace completions from Fireworks AI using W&B Weave
Created on November 22|Last edited on November 22
Comment
Easy Integration
import weavefrom fireworks.client import Fireworks# Initialize weave projectweave.init("my_awesome_project")client = Fireworks(api_key="<FIREWORKS_API_KEY>")@weave.op()def get_fireworks_completion(query):response = client.chat.completions.create(model="accounts/fireworks/models/llama-v3p1-8b-instruct",messages=[{"role": "user","content": query,}],)return response.choices[0].message.contentprint(get_fireworks_completion("Say this is a test"))
Deeper Integration
import weavefrom weave import Modelfrom fireworks.client import Fireworks# Initialize weave projectweave.init("my_awesome_project")client = Fireworks(api_key="<FIREWORKS_API_KEY>")class MyModel(Model):model_name: str@weave.op()def infer(self, query: str) -> str:response = client.chat.completions.create(model = model_name,messages = [{"role": "user","content": query,}],)return response.choices[0].message.content# Initialize Modelmodel = MyModel(model_name="accounts/fireworks/models/llama-v3p1-8b-instruct")print(model.infer("Say this is a test"))
Add a comment