“저는 여러 가지 이유로 Weave를 좋아합니다. 이미 잘 사용 중인 Weights & Biases에서 우리 제품의 GenAI 부분에 대한 수많은 정보를 얻기 위해 단 한줄만 추가하면 된다는 점이 가장 큰 장점입니다. AI의 성능 관련해서 관찰하고 있는 모든 것들을 이제 Weave를 통해 빠르고 쉽게 보고할 수 있습니다.”
Mike Maloney, Co-founder and CDO
Neuralift
import openai, weave
weave.init("weave-intro")@weave.opdefcorrect_grammar(user_input):
client = openai.OpenAI()
response = client.chat.completions.create(
model="o1-mini",
messages=[{"role":"user","content":"Correct the grammar:\n\n"+
user_input,}],)return response.choices[0].message.content.strip()
result = correct_grammar("That was peace of cake!")print(result)
import weave
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
# Initialize Weave with your project name
weave.init("langchain_demo")
llm = ChatOpenAI()
prompt = PromptTemplate.from_template("1 + {number} = ")
llm_chain = prompt | llm
output = llm_chain.invoke({"number":2})print(output)
import weave
from llama_index.core.chat_engine import SimpleChatEngine
# Initialize Weave with your project name
weave.init("llamaindex_demo")
chat_engine = SimpleChatEngine.from_defaults()
response = chat_engine.chat("Say something profound and romantic about fourth of July")print(response)
import wandb
# 1. Start a new run
run = wandb.init(project="gpt5")# 2. Save model inputs and hyperparameters
config = run.config
config.dropout =0.01# 3. Log gradients and model parameters
run.watch(model)for batch_idx,(data, target)inenumerate(train_loader):...if batch_idx % args.log_interval ==0:# 4. Log metrics to visualize performance
run.log({"loss": loss})
import wandb
# 1. Define which wandb project to log to and name your run
run = wandb.init(project="gpt-5",
run_name="gpt-5-base-high-lr")
# 2. Add wandb in your `TrainingArguments`
args =TrainingArguments(..., report_to="wandb")
# 3. W&B logging will begin automatically when your start training your Trainer
trainer =Trainer(..., args=args)
trainer.train()
from lightning.pytorch.loggers import WandbLogger
# initialise the logger
wandb_logger =WandbLogger(project="llama-4-fine-tune")# add configs such as batch size etc to the wandb config
wandb_logger.experiment.config["batch_size"]= batch_size
# pass wandb_logger to the Trainer
trainer =Trainer(..., logger=wandb_logger)# train the model
trainer.fit(...)
import wandb
# 1. Start a new run
run = wandb.init(project="gpt4")
# 2. Save model inputs and hyperparameters
config = wandb.config
config.learning_rate =0.01
# Model training here# 3. Log metrics to visualize performance over time
with tf.Session()assess:# ...
wandb.tensorflow.log(tf.summary.merge_all())
import wandb
from wandb.keras import (
WandbMetricsLogger,
WandbModelCheckpoint,)
# 1. Start a new run
run = wandb.init(project="gpt-4")
# 2. Save model inputs and hyperparameters
config = wandb.config
config.learning_rate =0.01...# Define a model# 3. Log layer dimensions and metrics
wandb_callbacks =[WandbMetricsLogger(log_freq=5),WandbModelCheckpoint("models"),]
model.fit(
X_train, y_train, validation_data=(X_test, y_test),
callbacks=wandb_callbacks,)
import wandb
from wandb.xgboost import wandb_callback
# 1. Start a new run
run = wandb.init(project="visualize-models")
# 2. Add the callback
bst = xgboost.train(param, xg_train, num_round, watchlist, callbacks=[wandb_callback()])
# Get predictions
pred = bst.predict(xg_test)