import osfrom openai import OpenAIfrom honeyhive import HoneyHiveclient = HoneyHive(api_key=os.environ["HH_API_KEY"])openai = OpenAI()# Fetch all configurations and find the one you wantconfigs = client.configurations.list()def get_prompt(name: str, env: str = "prod"): for c in configs: d = c.model_dump() if hasattr(c, "model_dump") else c # env is an array of deployed environments if d.get("name") == name and env in d.get("env", []): return d return None# Use the promptprompt = get_prompt("my-prompt", "prod")if prompt: params = prompt["parameters"] response = openai.chat.completions.create( model=params["model"], messages=params["template"], **params.get("hyperparameters", {}) )
The Python SDK returns all configurations. Filter client-side by name and env to get specific prompts.
from functools import lru_cache@lru_cache(maxsize=100)def get_prompt_cached(name: str, env: str = "prod"): configs = client.configurations.list() for c in configs: d = c.model_dump() if hasattr(c, "model_dump") else c if d.get("name") == name and env in d.get("env", []): return d return None# Clear cache when prompts are updated: get_prompt_cached.cache_clear()
Set a TTL on your cache to automatically refresh prompts when they’re updated.
For static deployments or version control, export prompts as YAML:
import osimport yamlfrom honeyhive import HoneyHiveclient = HoneyHive(api_key=os.environ["HH_API_KEY"])configs = client.configurations.list()# Find and export a promptfor c in configs: d = c.model_dump() if hasattr(c, "model_dump") else c if d.get("name") == "my-prompt" and "prod" in d.get("env", []): with open("prompts/my-prompt.yaml", "w") as f: yaml.dump(d, f, default_flow_style=False) break