Skip to main content

Python Examples

Examples using Python to interact with the Scrub API.

The easiest way to use Scrub with Python is the official OpenAI SDK:

pip install openai

Basic Request

from openai import OpenAI
import os

client = OpenAI(
api_key=os.environ.get("SCRUB_API_KEY"),
base_url="https://api.scrub.health/v1"
)

response = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "user", "content": "What are common symptoms of the flu?"}
]
)

print(response.choices[0].message.content)

With System Message

response = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful healthcare assistant."},
{"role": "user", "content": "What are the risk factors for heart disease?"}
],
temperature=0.7,
max_tokens=500
)

print(response.choices[0].message.content)

Streaming Response

stream = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "user", "content": "Explain the importance of regular exercise."}
],
stream=True
)

for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)

Using Different Providers

# Using Claude
response = client.chat.completions.create(
model="claude-3-sonnet",
messages=[{"role": "user", "content": "Hello!"}],
max_tokens=500
)

# Using Gemini
response = client.chat.completions.create(
model="gemini-1.5-pro",
messages=[{"role": "user", "content": "Hello!"}]
)

Using Requests Library

If you prefer not to use the OpenAI SDK:

import requests
import os

def chat_completion(messages, model="gpt-4"):
response = requests.post(
"https://api.scrub.health/v1/chat/completions",
headers={
"Authorization": f"Bearer {os.environ.get('SCRUB_API_KEY')}",
"Content-Type": "application/json"
},
json={
"model": model,
"messages": messages
}
)
response.raise_for_status()
return response.json()

# Usage
result = chat_completion([
{"role": "user", "content": "What is preventive care?"}
])

print(result["choices"][0]["message"]["content"])

Error Handling

from openai import OpenAI, APIError, APIConnectionError, RateLimitError

client = OpenAI(
api_key=os.environ.get("SCRUB_API_KEY"),
base_url="https://api.scrub.health/v1"
)

try:
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello"}]
)
print(response.choices[0].message.content)

except RateLimitError:
print("Rate limit exceeded. Please wait and retry.")

except APIConnectionError:
print("Failed to connect to Scrub API.")

except APIError as e:
print(f"API error: {e.message}")
# Check for PHI blocked error
if "phi_blocked" in str(e):
print("Request was blocked due to PHI detection.")

Async Usage

import asyncio
from openai import AsyncOpenAI
import os

client = AsyncOpenAI(
api_key=os.environ.get("SCRUB_API_KEY"),
base_url="https://api.scrub.health/v1"
)

async def main():
response = await client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)

asyncio.run(main())

Complete Example

"""
Scrub API Python Example
Healthcare chatbot with PHI protection
"""

from openai import OpenAI
import os

def create_client():
return OpenAI(
api_key=os.environ.get("SCRUB_API_KEY"),
base_url="https://api.scrub.health/v1"
)

def chat(client, user_message, conversation_history=None):
if conversation_history is None:
conversation_history = []

messages = [
{"role": "system", "content": "You are a helpful healthcare assistant."}
] + conversation_history + [
{"role": "user", "content": user_message}
]

response = client.chat.completions.create(
model="gpt-4",
messages=messages,
temperature=0.7
)

assistant_message = response.choices[0].message.content

# Update conversation history
conversation_history.append({"role": "user", "content": user_message})
conversation_history.append({"role": "assistant", "content": assistant_message})

return assistant_message, conversation_history

if __name__ == "__main__":
client = create_client()
history = []

print("Healthcare Assistant (type 'quit' to exit)")
print("-" * 40)

while True:
user_input = input("\nYou: ").strip()
if user_input.lower() == 'quit':
break

response, history = chat(client, user_input, history)
print(f"\nAssistant: {response}")