Skip to main content
After saving and deploying prompts in the Playground, fetch them in your application using the SDK.

Environments

Each project has three deployment environments:
EnvironmentPurpose
devDevelopment and testing
stagingPre-production validation
prodProduction deployment
Deploy prompts to environments in Studio > Prompts by selecting a saved prompt and choosing an environment.

Fetching and Using Prompts

import os
from openai import OpenAI
from honeyhive import HoneyHive

client = HoneyHive(api_key=os.environ["HH_API_KEY"])
openai = OpenAI()

# Fetch all configurations and find the one you want
configs = client.configurations.list()

def get_prompt(name: str, env: str = "prod"):
    for c in configs:
        d = c.model_dump() if hasattr(c, "model_dump") else c
        # env is an array of deployed environments
        if d.get("name") == name and env in d.get("env", []):
            return d
    return None

# Use the prompt
prompt = get_prompt("my-prompt", "prod")
if prompt:
    params = prompt["parameters"]
    response = openai.chat.completions.create(
        model=params["model"],
        messages=params["template"],
        **params.get("hyperparameters", {})
    )
The Python SDK returns all configurations. Filter client-side by name and env to get specific prompts.

Caching for Production

Cache prompts to reduce API calls:
from functools import lru_cache

@lru_cache(maxsize=100)
def get_prompt_cached(name: str, env: str = "prod"):
    configs = client.configurations.list()
    for c in configs:
        d = c.model_dump() if hasattr(c, "model_dump") else c
        if d.get("name") == name and env in d.get("env", []):
            return d
    return None

# Clear cache when prompts are updated: get_prompt_cached.cache_clear()
Set a TTL on your cache to automatically refresh prompts when they’re updated.

YAML Export

For static deployments or version control, export prompts as YAML:
import os
import yaml
from honeyhive import HoneyHive

client = HoneyHive(api_key=os.environ["HH_API_KEY"])
configs = client.configurations.list()

# Find and export a prompt
for c in configs:
    d = c.model_dump() if hasattr(c, "model_dump") else c
    if d.get("name") == "my-prompt" and "prod" in d.get("env", []):
        with open("prompts/my-prompt.yaml", "w") as f:
            yaml.dump(d, f, default_flow_style=False)
        break
Load in your application:
import yaml

with open("prompts/my-prompt.yaml") as f:
    config = yaml.safe_load(f)

params = config["parameters"]
template = params["template"]
model = params["model"]