from hamming import Hamming, ClientOptions, GenerationParams
from openai import OpenAI
HAMMING_API_KEY = "<your-secret-key>"
OPENAI_API_KEY = "<your-openai-key>"
openai_client = OpenAI(api_key=OPENAI_API_KEY)
hamming = Hamming(ClientOptions(api_key=HAMMING_API_KEY))
hamming.monitoring.start()
trace = hamming.tracing
def run():
question = "What is the capital of France?"
print("Question:", question)
with hamming.monitoring.start_item() as item:
item.set_input({"question": question})
response = openai_client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": question}],
max_tokens=1000,
temperature=0.7,
)
answer = response.choices[0].message.content
trace.log_generation(
GenerationParams(
input=question,
output=answer,
metadata=GenerationParams.Metadata(
provider="openai",
model="gpt-3.5-turbo",
max_tokens=1000,
temperature=0.7,
error=False,
),
),
)
print("Answer:", answer)
item.set_output({"answer": answer})
if __name__ == "__main__":
run()