2. Create a script for a simple Langchain application
Make sure to replace the placeholders with your actual keys and dataset ID created in the previous step.
Create a file named langchain-app.js and add the following code:
langchain-app.js
Copy
Ask AI
const { Hamming } = require("@hamming/hamming-sdk");const { OpenAI } = require("openai");// Setup OpenAI API Key to be used by the Langchain LLM call process.env.OPENAI_API_KEY = "<your-openai-key>";const HAMMING_API_KEY = "<your-secret-key>";const hamming = new Hamming({ apiKey: HAMMING_API_KEY });// Setup Hamming callback to be used by the Langchain chainconst cb = new HammingCallbackHandler(hamming);async function run() { const model = new ChatOpenAI({ model: "gpt-4-turbo" }); const systemTemplate = "Translate the following into {language}:"; const promptTemplate = ChatPromptTemplate.fromMessages([ ["system", systemTemplate], ["user", "{text}"], ]); const chain = promptTemplate.pipe(model); const result = await chain.invoke( { language: "italian", text: "hi" }, { callbacks: [cb] } // Pass the Hamming callback into the chain ); console.log(result.content);}run().catch(console.error);
3. Run the Langchain application script
Install dependencies:
Copy
Ask AI
npm install langchain @langchain/openai
Run the script by executing the following command in your terminal:
Copy
Ask AI
node langchain-app.js
This will create a monitoring item in Hamming with the corresponding LLM trace. You can view the item on the Monitoring page.
2. Create a script for a simple Langchain application
Create a file named langchain-app.py and add the following code:
langchain-app.py
Copy
Ask AI
import osfrom hamming import ( ClientOptions, Hamming, LangchainCallbackHandler)from operator import itemgetterfrom langchain_openai import ChatOpenAIfrom langchain.prompts import ChatPromptTemplatefrom langchain.schema import StrOutputParserHAMMING_API_KEY = "<your-secret-key>"# Setup OpenAI API Key to be used by the Langchain LLM call os.environ["OPENAI_API_KEY"] = "<your-openai-key>"hamming = Hamming(ClientOptions(api_key=HAMMING_API_KEY))# Setup Hamming callback to be used by the Langchain chaincallback_handler = LangchainCallbackHandler(hamming)def run(): prompt1 = ChatPromptTemplate.from_template("what is the city {person} is from?") prompt2 = ChatPromptTemplate.from_template( "what country is the city {city} in? respond in {language}" ) model = ChatOpenAI() chain1 = prompt1 | model | StrOutputParser() chain2 = ( {"city": chain1, "language": itemgetter("language")} | prompt2 | model | StrOutputParser() ) chain2.invoke( {"person": "trudeau", "language": "french"}, config={"callbacks":[callback_handler]} # Pass the Hamming callback into the chain )if __name__ == "__main__": run()
3. Run the Langchain application script
Install dependencies:
Copy
Ask AI
pip install langchain langchain_openai
Run the script by executing the following command in your terminal:
Copy
Ask AI
python langchain-app.py
This will create a monitoring item in Hamming with the corresponding LLM trace. You can view the item in the Monitoring dashboard.