// Create a single knowledge entryconst knowledge = await client.knowledge.create({ title: 'Return Policy', content: ` Returns are accepted within 30 days of product arrival. Return conditions: - Item must be unused and unopened - Tags must still be attached - Receipt or order confirmation email required Return process: 1. Submit return request from My Page 2. Print the return shipping label 3. Package and ship the item `, tags: ['returns', 'policy', 'customer-support'],});console.log(`Created: ${knowledge.id}`);
// Get details of the most relevant resultif (results.length > 0) { const detail = await client.knowledge.get(results[0].id); console.log(detail.title); console.log(detail.content); // Check related knowledge for (const connected of detail.connectedKnowledge) { console.log(`Related: ${connected.title}`); }}
The Chat API is the recommended way to use your knowledge. It handles context retrieval, prompt construction, and LLM calls in one step.
Chat API automatically handles context retrieval, prompt construction, and LLM calls internally. Just set memory: { enabled: true } and your knowledge-powered RAG is complete.
import { NdxClient } from '@neuradex/sdk';const client = new NdxClient({ apiKey: process.env.NEURADEX_API_KEY, projectId: process.env.NEURADEX_PROJECT_ID,});// RAG in one step — SDK handles memory retrieval and prompt constructionconst stream = client.chat.create({ model: 'gpt-4o', messages: [ { role: 'system', content: 'You are a customer support assistant.', }, { role: 'user', content: 'How do I return a product?', }, ], memory: { enabled: true, maxTokens: 4000, includeEpisodes: true },});// Stream responses in real-timefor await (const chunk of stream.textStream) { process.stdout.write(chunk);}