Types
All TypeScript types exported from @agent-layer-zero/dendrite.
NeuronConfig
Configuration object for createNeuron().
ts
interface NeuronConfig {
modelId: string
worker?: Worker | (() => Worker)
systemPrompt?: string
personalityDocs?: PersonalityDoc[]
temperature?: number // default: 0.2
maxTokens?: number // default: 2048
frequencyPenalty?: number // default: 0.5
maxHistoryTurns?: number // default: 10
useIndexedDBCache?: boolean // default: true
// Inference selection
inference?: 'auto' | 'fastest' | 'fallback' | 'api' // default: 'auto'
engine?: 'webllm' | 'transformers' | 'api' // overrides inference
// Callbacks
onProgress?: (percent: number, text: string) => void
onError?: (error: Error) => void
onLoadingChange?: (loading: boolean) => void
onGeneratingChange?: (generating: boolean) => void
onInferenceSelected?: (tier: {
engine: 'webllm' | 'transformers' | 'api'
mode: 'webgpu' | 'wasm' | 'cloud'
reason: string
}) => void
// API connection
apiUrl?: string
username?: string
instanceSlug?: string
apiToken?: string
}Neuron
The instance returned by createNeuron().
ts
interface Neuron {
send(message: string): AsyncGenerator<string>
complete(message: string): Promise<string>
stop(): void
readonly isLoading: boolean
readonly isGenerating: boolean
readonly loadProgress: LoadProgress
setModel(modelId: string): Promise<void>
setPersonalityDocs(docs: PersonalityDoc[]): void
setSystemPrompt(prompt: string): void
// Sampler hot-swap (0.3.0+) — no engine rebuild
setTemperature(value: number): void
setMaxTokens(value: number): void
setFrequencyPenalty(value: number): void
setMaxHistoryTurns(value: number): void
getHistory(): Array<{ role: 'user' | 'assistant'; content: string }>
setHistory(messages: Array<{ role: 'user' | 'assistant'; content: string }>): void
clearHistory(): void
destroy(): void
}PersonalityDoc
A document that shapes the LLM's behavior.
ts
interface PersonalityDoc {
type: 'zero-shot' | 'personality' | 'knowledge' | 'instructions'
content: string
name?: string // label for prompt headings
enabled?: boolean // default: true
order?: number // sort order, lower = first
}ChatMessage
A message in the LLM conversation.
ts
interface ChatMessage {
role: 'system' | 'user' | 'assistant'
content: string
}LoadProgress
Model loading progress state.
ts
interface LoadProgress {
percent: number // 0-100
text: string // status description
}ModelOption
Metadata for an available model.
ts
interface ModelOption {
id: string // WebLLM model ID
label: string // human-readable name
vram: string // estimated VRAM usage
tier: string // 'Tiny' | 'Light' | 'Standard' | 'Heavy' | 'Reasoning'
}