Get Started
Voice Agent Testing
Call Monitoring
Monitoring
Track the performance of your AI application in real-time.
Before you begin
Follow the Setting up guide to make sure that you have access to the Hamming dashboard and you have created a secret key.
Quickstart - Node.js
Learn how to monitor your AI application with our Hamming TypeScript SDK.
npm install @hamming/hamming-sdk
import { Hamming } from "@hamming/hamming-sdk";
import { OpenAI } from "openai";
const HAMMING_API_KEY = "<your-secret-key>";
const OPENAI_API_KEY = "<your-openai-key>";
const openai = new OpenAI({ apiKey: OPENAI_API_KEY });
const hamming = new Hamming({ apiKey: HAMMING_API_KEY });
hamming.monitoring.start();
const trace = hamming.tracing;
async function run() {
const question = "What is the capital of France?";
console.log("Question:", question);
const output = await hamming.monitoring.runItem(async (item) => {
item.setInput({ question });
const response = await openai.chat.completions.create({
model: "gpt-4-turbo",
messages: [{ role: "user", content: question }],
max_tokens: 1000,
temperature: 0.7,
});
const answer = response.choices[0].message.content;
trace.logGeneration({
input: question,
output: answer,
metadata: {
provider: "openai",
model: "gpt-4-turbo",
max_tokens: 1000,
temperature: 0.7,
error: false,
},
});
return { answer };
});
console.log("Answer:", output.answer);
}
run().catch(console.error);
Install dependencies:
npm install openai
Run the script by executing the following command in your terminal:
npx tsx ai-app.ts
This will create a monitoring item in Hamming with the corresponding LLM trace. You can view the item on the Monitoring page.
Quickstart - Python
Learn how to monitor your AI application with our Hamming Python SDK.
pip install hamming-sdk
Create a file named ai-app.py and add the following code:
from hamming import Hamming, ClientOptions, GenerationParams
from openai import OpenAI
HAMMING_API_KEY = "<your-secret-key>"
OPENAI_API_KEY = "<your-openai-key>"
openai_client = OpenAI(api_key=OPENAI_API_KEY)
hamming = Hamming(ClientOptions(api_key=HAMMING_API_KEY))
hamming.monitoring.start()
trace = hamming.tracing
def run():
question = "What is the capital of France?"
print("Question:", question)
with hamming.monitoring.start_item() as item:
item.set_input({"question": question})
response = openai_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": question}],
max_tokens=1000,
temperature=0.7,
)
answer = response.choices[0].message.content
trace.log_generation(
GenerationParams(
input=question,
output=answer,
metadata=GenerationParams.Metadata(
provider="openai",
model="gpt-3.5-turbo",
max_tokens=1000,
temperature=0.7,
error=False,
),
),
)
print("Answer:", answer)
item.set_output({"answer": answer})
if __name__ == "__main__":
run()
Install dependencies:
pip install openai
Run the script by executing the following command in your terminal:
python ai-app.py
This will create a monitoring item in Hamming with the corresponding LLM trace. You can view the item in the Monitoring dashboard.