Node.js Examples
Examples using Node.js to interact with the Scrub API.
Using the OpenAI SDK (Recommended)
The easiest way to use Scrub with Node.js is the official OpenAI SDK:
npm install openai
Basic Request
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: process.env.SCRUB_API_KEY,
baseURL: 'https://api.scrub.health/v1',
});
async function main() {
const response = await client.chat.completions.create({
model: 'gpt-4',
messages: [
{ role: 'user', content: 'What are common symptoms of the flu?' }
],
});
console.log(response.choices[0].message.content);
}
main();
With System Message
const response = await client.chat.completions.create({
model: 'gpt-4',
messages: [
{ role: 'system', content: 'You are a helpful healthcare assistant.' },
{ role: 'user', content: 'What are the risk factors for heart disease?' }
],
temperature: 0.7,
max_tokens: 500,
});
console.log(response.choices[0].message.content);
Streaming Response
const stream = await client.chat.completions.create({
model: 'gpt-4',
messages: [
{ role: 'user', content: 'Explain the importance of regular exercise.' }
],
stream: true,
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
process.stdout.write(content);
}
}
Using Different Providers
// Using Claude
const response = await client.chat.completions.create({
model: 'claude-3-sonnet',
messages: [{ role: 'user', content: 'Hello!' }],
max_tokens: 500,
});
// Using Gemini
const response = await client.chat.completions.create({
model: 'gemini-1.5-pro',
messages: [{ role: 'user', content: 'Hello!' }],
});
Using Fetch API
If you prefer not to use the OpenAI SDK:
async function chatCompletion(messages, model = 'gpt-4') {
const response = await fetch('https://api.scrub.health/v1/chat/completions', {
method: 'POST',
headers: {
'Authorization': `Bearer ${process.env.SCRUB_API_KEY}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
model,
messages,
}),
});
if (!response.ok) {
throw new Error(`API error: ${response.status}`);
}
return response.json();
}
// Usage
const result = await chatCompletion([
{ role: 'user', content: 'What is preventive care?' }
]);
console.log(result.choices[0].message.content);
Error Handling
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: process.env.SCRUB_API_KEY,
baseURL: 'https://api.scrub.health/v1',
});
async function safeChatCompletion(messages) {
try {
const response = await client.chat.completions.create({
model: 'gpt-4',
messages,
});
return response.choices[0].message.content;
} catch (error) {
if (error instanceof OpenAI.RateLimitError) {
console.error('Rate limit exceeded. Please wait and retry.');
} else if (error instanceof OpenAI.APIConnectionError) {
console.error('Failed to connect to Scrub API.');
} else if (error instanceof OpenAI.APIError) {
console.error(`API error: ${error.message}`);
// Check for PHI blocked error
if (error.message.includes('phi_blocked')) {
console.error('Request was blocked due to PHI detection.');
}
} else {
throw error;
}
}
}
TypeScript Example
import OpenAI from 'openai';
import type { ChatCompletionMessageParam } from 'openai/resources/chat';
const client = new OpenAI({
apiKey: process.env.SCRUB_API_KEY,
baseURL: 'https://api.scrub.health/v1',
});
interface ChatResponse {
content: string;
model: string;
usage: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
};
}
async function chat(messages: ChatCompletionMessageParam[]): Promise<ChatResponse> {
const response = await client.chat.completions.create({
model: 'gpt-4',
messages,
});
return {
content: response.choices[0].message.content ?? '',
model: response.model,
usage: {
promptTokens: response.usage?.prompt_tokens ?? 0,
completionTokens: response.usage?.completion_tokens ?? 0,
totalTokens: response.usage?.total_tokens ?? 0,
},
};
}
// Usage
const result = await chat([
{ role: 'system', content: 'You are a helpful healthcare assistant.' },
{ role: 'user', content: 'What is hypertension?' },
]);
console.log(result.content);
Express.js Integration
import express from 'express';
import OpenAI from 'openai';
const app = express();
app.use(express.json());
const client = new OpenAI({
apiKey: process.env.SCRUB_API_KEY,
baseURL: 'https://api.scrub.health/v1',
});
app.post('/api/chat', async (req, res) => {
try {
const { message } = req.body;
const response = await client.chat.completions.create({
model: 'gpt-4',
messages: [
{ role: 'system', content: 'You are a helpful healthcare assistant.' },
{ role: 'user', content: message },
],
});
res.json({
reply: response.choices[0].message.content,
});
} catch (error) {
console.error('Chat error:', error);
res.status(500).json({ error: 'Failed to process request' });
}
});
app.listen(3000, () => {
console.log('Server running on port 3000');
});
Complete Example
/**
* Scrub API Node.js Example
* Healthcare chatbot with PHI protection
*/
import OpenAI from 'openai';
import readline from 'readline';
const client = new OpenAI({
apiKey: process.env.SCRUB_API_KEY,
baseURL: 'https://api.scrub.health/v1',
});
const conversationHistory = [];
async function chat(userMessage) {
conversationHistory.push({ role: 'user', content: userMessage });
const response = await client.chat.completions.create({
model: 'gpt-4',
messages: [
{ role: 'system', content: 'You are a helpful healthcare assistant.' },
...conversationHistory,
],
temperature: 0.7,
});
const assistantMessage = response.choices[0].message.content;
conversationHistory.push({ role: 'assistant', content: assistantMessage });
return assistantMessage;
}
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
});
console.log('Healthcare Assistant (type "quit" to exit)');
console.log('-'.repeat(40));
function prompt() {
rl.question('\nYou: ', async (input) => {
const trimmed = input.trim();
if (trimmed.toLowerCase() === 'quit') {
rl.close();
return;
}
try {
const response = await chat(trimmed);
console.log(`\nAssistant: ${response}`);
} catch (error) {
console.error('Error:', error.message);
}
prompt();
});
}
prompt();