Skip to content

Commit bad6972

Browse files
committed
Replace WatsonX TypeScript Jupyter notebook with a Node.js module
1 parent fac3f09 commit bad6972

File tree

10 files changed

+1763
-343
lines changed

10 files changed

+1763
-343
lines changed
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
# Get your credentials at dataplatform.cloud.ibm.com:
2+
# - Under developer access, select the default project
3+
# - Get the project ID
4+
# - Get the wastonxai URL
5+
# - Create an API key
6+
WATSONX_PROJECT_ID = "project_id"
7+
WATSONX_URL = "https://region.ml.cloud.ibm.com"
8+
WATSONX_API_KEY = "your_api_key"
9+
10+
# Get your API key at e2b.dev:
11+
E2B_API_KEY = "your_api_key"
Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
// Code interpreter with IBM WatsonX AI Graphite
2+
// This example uses the [E2B SDK](https://github.com/e2b-dev/code-interpreter) as a code interpreter for [IBM Granite Code](https://www.ibm.com/granite/docs/models/code/) on [IBM WatsonX](https://www.ibm.com/watsonx).
3+
// The code generated by the LLM runs in a [E2B secure cloud sandbox](https://e2b.dev/docs/sandbox/overview).
4+
5+
// 1. Imports and API keys
6+
// You need to get your API keys and save them in .env file. You can copy and rename the .env.template file. We import all necessary libraries.
7+
8+
import { config } from "dotenv";
9+
import { WatsonXAI } from "@ibm-cloud/watsonx-ai";
10+
import { IamAuthenticator } from "ibm-cloud-sdk-core";
11+
import { Sandbox } from "@e2b/code-interpreter";
12+
import fs from "fs";
13+
14+
// Load environment variables from .env file
15+
config({ path: "./.env" });
16+
17+
// 2. Initialize WatsonX AI Client
18+
// We initialize the WatsonX AI client using the credentials stored in the environment variables. This client will be used to interact with the IBM WatsonX Granite model.
19+
20+
// Environment variables
21+
const WATSONX_PROJECT_ID = process.env.WATSONX_PROJECT_ID;
22+
const WATSONX_URL = process.env.WATSONX_URL || "https://us-south.ml.cloud.ibm.com";
23+
const WATSONX_API_KEY = process.env.WATSONX_API_KEY;
24+
25+
// Initialize WatsonX client
26+
const watsonxAIService = WatsonXAI.newInstance({
27+
version: "2024-05-31",
28+
serviceUrl: WATSONX_URL,
29+
authenticator: new IamAuthenticator({ apikey: WATSONX_API_KEY || "" }),
30+
});
31+
32+
// Parameters for the text generation request
33+
const params = {
34+
modelId: "ibm/granite-34b-code-instruct",
35+
projectId: WATSONX_PROJECT_ID!,
36+
parameters: {
37+
max_new_tokens: 1024,
38+
},
39+
};
40+
41+
console.log("WatsonX service initialized");
42+
43+
// 3. Configure System Prompt and Generate a Response
44+
// We define a system prompt that instructs the model on how to behave. Then, we send a user query to the model and retrieve a response.
45+
46+
// System prompt configuration
47+
// Because this is Markdown, the LLM will respond in Markdown
48+
const SYSTEM_PROMPT = `
49+
## Your job & context
50+
You are a python data scientist. You are given tasks to complete and you run Python code to solve them.
51+
- The Python code runs in Jupyter notebook.
52+
- You have access to the internet and can make API requests.
53+
- You also have access to the filesystem and can read/write files.
54+
- You can install any pip package but the usual packages for data analysis are already preinstalled.
55+
`;
56+
57+
// Specify the prompt
58+
const userMessage = "Plot a 3D chart of sin x cos y.";
59+
console.log(`User Message: ${userMessage}`);
60+
61+
const prompt = SYSTEM_PROMPT + userMessage + "\n";
62+
63+
// Generate response using the Watson SDK
64+
console.log("Generating response from WatsonX...");
65+
const response = await watsonxAIService.generateText({ input: prompt, ...params });
66+
const content = response.result.results[0].generated_text;
67+
68+
console.log(`Model response:`);
69+
console.log(`${"=".repeat(50)}\n${content}\n${"=".repeat(50)}`);
70+
71+
// 4. Extract and Display AI-Generated Code
72+
// The AI-generated response may include Markdown formatting. We extract the code block from the response using regular expressions.
73+
74+
// Extract code blocks
75+
const pattern = /```(?:python)?[\n\r](.*?)```/gs;
76+
const results = Array.from(content.matchAll(pattern));
77+
const codeBlock = results.length ? results.map(match => match[1]).join("\n") : content;
78+
79+
// Print the code block
80+
console.log(`AI-generated code:`);
81+
console.log(`${"=".repeat(50)}\n${codeBlock}\n${"=".repeat(50)}`);
82+
83+
// 5. Execute Code in an E2B Code Interpreter
84+
// We run the AI-generated code in a secure E2B sandbox environment, capturing its output, errors, and runtime exceptions.
85+
86+
// Run the code in an E2B code interpreter
87+
const codeInterpreter = await Sandbox.create();
88+
const execution = await codeInterpreter.runCode(codeBlock);
89+
90+
console.log("Stdout:", execution.logs.stdout);
91+
console.log("Stderr:", execution.logs.stderr);
92+
console.log("Python runtime error:", execution.error ?? "None");
93+
94+
// 6. Display Generated Figures
95+
// If the AI-generated code produces visualizations, we retrieve and display them.
96+
97+
// Print the first figure
98+
const figures = execution.results.map(result => {
99+
// In a real script, you'd want to save this to a file or display it another way
100+
const binaryData = Buffer.from(result.png, 'base64');
101+
return binaryData; // In a script, you'd likely save this to a file
102+
});
103+
104+
console.log(`Generated ${figures.length} figures`);
105+
fs.writeFileSync('figure.png', figures[0]);
106+
107+
// 7. Terminate the Code Interpreter
108+
// Finally, we safely terminate the E2B code interpreter to free up resources.
109+
110+
// Kill the code interpreter
111+
await codeInterpreter.kill();
Loading

0 commit comments

Comments
 (0)