Quickstart
Upload a document, search it, and use the results in your AI -- all in 5 minutes.
Base URL: https://api.inherent.systems
Auth: Every request requires an X-API-Key header.
Step 1: Get Your API Key
- Sign in to the Inherent Dashboard.
- Go to Settings > API Keys.
- Click Create API Key and copy the key.
API keys start with ink_. Store yours in an environment variable:
export INHERENT_API_KEY="ink_your_api_key"
Step 2: Upload a Document
Upload a file to your knowledge base. Inherent handles parsing, chunking, and embedding automatically.
- cURL
- Python
- JavaScript
curl -X POST https://api.inherent.systems/api/v1/documents \
-H "X-API-Key: $INHERENT_API_KEY" \
-F "file=@./my-document.pdf"
import requests
API_KEY = "ink_your_api_key"
BASE_URL = "https://api.inherent.systems/api/v1"
with open("my-document.pdf", "rb") as f:
resp = requests.post(
f"{BASE_URL}/documents",
headers={"X-API-Key": API_KEY},
files={"file": ("my-document.pdf", f, "application/pdf")},
)
print(resp.json())
const API_KEY = 'ink_your_api_key';
const BASE_URL = 'https://api.inherent.systems/api/v1';
const formData = new FormData();
formData.append('file', fs.createReadStream('./my-document.pdf'));
const resp = await fetch(`${BASE_URL}/documents`, {
method: 'POST',
headers: { 'X-API-Key': API_KEY },
body: formData,
});
console.log(await resp.json());
Response:
{
"document_id": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
"name": "my-document.pdf",
"workspace_id": "ws_abc123",
"mime_type": "application/pdf",
"size_bytes": 245760,
"status": "pending",
"message": "Document uploaded successfully. Processing will begin shortly."
}
Documents are processed asynchronously. The upload returns immediately with status: "pending". Processing typically takes a few seconds -- check the status before searching.
Step 3: Check Document Status
Poll the documents endpoint until your document shows status: "processed".
- cURL
- Python
- JavaScript
curl https://api.inherent.systems/api/v1/documents \
-H "X-API-Key: $INHERENT_API_KEY"
resp = requests.get(
f"{BASE_URL}/documents",
headers={"X-API-Key": API_KEY},
)
for doc in resp.json()["documents"]:
print(f"{doc['name']} -- {doc['status']}")
const resp = await fetch(`${BASE_URL}/documents`, {
headers: { 'X-API-Key': API_KEY },
});
const data = await resp.json();
data.documents.forEach(doc => {
console.log(`${doc.name} -- ${doc.status}`);
});
Response:
{
"documents": [
{
"id": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
"name": "my-document.pdf",
"workspace_id": "ws_abc123",
"source_type": "upload",
"mime_type": "application/pdf",
"size_bytes": 245760,
"chunk_count": 42,
"status": "processed",
"created_at": "2026-04-03T10:30:00Z",
"updated_at": "2026-04-03T10:30:05Z",
"metadata": null
}
],
"total": 1,
"page": 1,
"page_size": 20
}
When status is "processed", the document is searchable.
Step 4: Search Your Knowledge Base
Run a semantic search query to retrieve relevant chunks.
- cURL
- Python
- JavaScript
curl -X POST https://api.inherent.systems/api/v1/search \
-H "X-API-Key: $INHERENT_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"query": "What does this document cover?",
"limit": 5
}'
resp = requests.post(
f"{BASE_URL}/search",
headers={"X-API-Key": API_KEY},
json={"query": "What does this document cover?", "limit": 5},
)
for result in resp.json()["results"]:
print(f"[{result['score']:.3f}] {result['content'][:120]}...")
const resp = await fetch(`${BASE_URL}/search`, {
method: 'POST',
headers: {
'X-API-Key': API_KEY,
'Content-Type': 'application/json',
},
body: JSON.stringify({
query: 'What does this document cover?',
limit: 5,
}),
});
const data = await resp.json();
data.results.forEach(r => {
console.log(`[${r.score.toFixed(3)}] ${r.content.slice(0, 120)}...`);
});
Response:
{
"results": [
{
"chunk_id": "chunk_xyz789",
"document_id": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
"document_name": "my-document.pdf",
"content": "This document provides an overview of the system architecture, including the ingestion pipeline, search infrastructure, and audit logging.",
"score": 0.94,
"metadata": null
},
{
"chunk_id": "chunk_abc456",
"document_id": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
"document_name": "my-document.pdf",
"content": "The platform supports PDF, Markdown, DOCX, and plain text formats. Documents are automatically chunked and indexed for semantic retrieval.",
"score": 0.87,
"metadata": null
}
],
"query": "What does this document cover?",
"total_results": 2,
"processing_time_ms": 38.7
}
Step 5: Use with Your LLM
Feed the retrieved context into your LLM to get grounded answers.
- OpenAI
- Anthropic
import openai
import requests
API_KEY = "ink_your_api_key"
BASE_URL = "https://api.inherent.systems/api/v1"
def ask(question: str) -> str:
# 1. Retrieve context from Inherent
search_resp = requests.post(
f"{BASE_URL}/search",
headers={"X-API-Key": API_KEY},
json={"query": question, "limit": 5},
)
chunks = search_resp.json()["results"]
context = "\n\n".join(c["content"] for c in chunks)
# 2. Send to OpenAI with context
completion = openai.chat.completions.create(
model="gpt-4o",
messages=[
{
"role": "system",
"content": (
"Answer the user's question using only the provided context. "
"If the context doesn't contain the answer, say so.\n\n"
f"Context:\n{context}"
),
},
{"role": "user", "content": question},
],
)
return completion.choices[0].message.content
answer = ask("What file formats are supported?")
print(answer)
import anthropic
import requests
API_KEY = "ink_your_api_key"
BASE_URL = "https://api.inherent.systems/api/v1"
def ask(question: str) -> str:
# 1. Retrieve context from Inherent
search_resp = requests.post(
f"{BASE_URL}/search",
headers={"X-API-Key": API_KEY},
json={"query": question, "limit": 5},
)
chunks = search_resp.json()["results"]
context = "\n\n".join(c["content"] for c in chunks)
# 2. Send to Claude with context
client = anthropic.Anthropic()
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
system=(
"Answer the user's question using only the provided context. "
"If the context doesn't contain the answer, say so.\n\n"
f"Context:\n{context}"
),
messages=[{"role": "user", "content": question}],
)
return message.content[0].text
answer = ask("What file formats are supported?")
print(answer)
For production, add error handling and consider caching frequent queries. The processing_time_ms field in search responses helps you monitor latency.
Next Steps
- Authentication Guide -- API key management and security best practices
- Uploading Documents -- Supported file types, metadata, and processing details
- Searching Your Knowledge Base -- Search filters, scoring, and tuning
- Retrieving Context -- Chunks, full context, and LLM prompt building
- API Reference -- Complete endpoint documentation