Documentation
¶
Overview ¶
Package modules provides the Parallel execution wrapper for DSPy-Go.
The Parallel module enables concurrent execution of any DSPy module across multiple inputs, providing significant performance improvements for batch processing.
Example usage:
predict := modules.NewPredict(signature)
parallel := modules.NewParallel(predict,
modules.WithMaxWorkers(4),
modules.WithReturnFailures(true))
batchInputs := []map[string]interface{}{
{"input": "first example"},
{"input": "second example"},
{"input": "third example"},
}
result, err := parallel.Process(ctx, map[string]interface{}{
"batch_inputs": batchInputs,
})
results := result["results"].([]map[string]interface{})
The parallel module automatically manages worker pools, error handling, and result collection while maintaining the order of inputs in outputs.
Index ¶
- func ProcessTyped[TInput, TOutput any](ctx context.Context, predict *Predict, inputs TInput, opts ...core.Option) (TOutput, error)
- func ProcessTypedWithValidation[TInput, TOutput any](ctx context.Context, predict *Predict, inputs TInput, opts ...core.Option) (TOutput, error)
- type ChainOfThought
- func (c *ChainOfThought) ClearInterceptors()
- func (c *ChainOfThought) Clone() core.Module
- func (c *ChainOfThought) Compose(next core.Module) core.Module
- func (c *ChainOfThought) GetDisplayName() string
- func (c *ChainOfThought) GetInterceptors() []core.ModuleInterceptor
- func (c *ChainOfThought) GetModuleType() string
- func (c *ChainOfThought) GetSignature() core.Signature
- func (c *ChainOfThought) GetSubModules() []core.Module
- func (c *ChainOfThought) Process(ctx context.Context, inputs map[string]any, opts ...core.Option) (map[string]any, error)
- func (c *ChainOfThought) ProcessWithInterceptors(ctx context.Context, inputs map[string]any, ...) (map[string]any, error)
- func (c *ChainOfThought) SetInterceptors(interceptors []core.ModuleInterceptor)
- func (c *ChainOfThought) SetLLM(llm core.LLM)
- func (c *ChainOfThought) SetSignature(signature core.Signature)
- func (c *ChainOfThought) SetSubModules(modules []core.Module)
- func (c *ChainOfThought) WithDefaultOptions(opts ...core.Option) *ChainOfThought
- func (c *ChainOfThought) WithName(name string) *ChainOfThought
- func (c *ChainOfThought) WithStructuredOutput() *ChainOfThought
- func (c *ChainOfThought) WithStructuredOutputConfig(config interceptors.ChainOfThoughtStructuredConfig) *ChainOfThought
- type Module
- type MultiChainComparison
- func (m *MultiChainComparison) Clone() core.Module
- func (m *MultiChainComparison) GetSignature() core.Signature
- func (m *MultiChainComparison) Process(ctx context.Context, inputs map[string]interface{}, opts ...core.Option) (map[string]interface{}, error)
- func (m *MultiChainComparison) SetLLM(llm core.LLM)
- func (m *MultiChainComparison) WithName(name string) *MultiChainComparison
- type OfferFeedback
- type Parallel
- func (p *Parallel) Clone() core.Module
- func (p *Parallel) GetInnerModule() core.Module
- func (p *Parallel) Process(ctx context.Context, inputs map[string]interface{}, opts ...core.Option) (map[string]interface{}, error)
- func (p *Parallel) SetLLM(llm core.LLM)
- func (p *Parallel) WithName(name string) *Parallel
- type ParallelOption
- type ParallelOptions
- type ParallelResult
- type Predict
- func (p *Predict) Clone() core.Module
- func (p *Predict) GetDemos() []core.Example
- func (p *Predict) GetLLMIdentifier() map[string]string
- func (p *Predict) GetSignature() core.Signature
- func (p *Predict) GetXMLConfig() *interceptors.XMLConfig
- func (p *Predict) IsXMLModeEnabled() bool
- func (p *Predict) Process(ctx context.Context, inputs map[string]interface{}, opts ...core.Option) (map[string]interface{}, error)
- func (p *Predict) ProcessWithInterceptors(ctx context.Context, inputs map[string]any, ...) (map[string]any, error)
- func (p *Predict) SetDemos(demos []core.Example)
- func (p *Predict) SetLLM(llm core.LLM)
- func (p *Predict) WithDefaultOptions(opts ...core.Option) *Predict
- func (p *Predict) WithName(name string) *Predict
- func (p *Predict) WithStructuredOutput() *Predict
- func (p *Predict) WithStructuredOutputConfig(config interceptors.StructuredOutputConfig) *Predict
- func (p *Predict) WithTextOutput() *Predict
- func (p *Predict) WithXMLOutput(config interceptors.XMLConfig) *Predict
- type ReAct
- func (r *ReAct) Clone() core.Module
- func (r *ReAct) Process(ctx context.Context, inputs map[string]any, opts ...core.Option) (map[string]any, error)
- func (r *ReAct) SetLLM(llm core.LLM)
- func (r *ReAct) WithDefaultOptions(opts ...core.Option) *ReAct
- func (r *ReAct) WithNativeFunctionCalling() *ReAct
- func (r *ReAct) WithNativeFunctionCallingConfig(config interceptors.FunctionCallingConfig) *ReAct
- func (r *ReAct) WithXMLParsing(config interceptors.XMLConfig) *ReAct
- type Refine
- func (r *Refine) Clone() core.Module
- func (r *Refine) GetConfig() RefineConfig
- func (r *Refine) GetSignature() core.Signature
- func (r *Refine) GetWrappedModule() core.Module
- func (r *Refine) Process(ctx context.Context, inputs map[string]interface{}, opts ...core.Option) (map[string]interface{}, error)
- func (r *Refine) SetLLM(llm core.LLM)
- func (r *Refine) SetSignature(signature core.Signature)
- func (r *Refine) UpdateConfig(config RefineConfig) *Refine
- func (r *Refine) WithDefaultOptions(opts ...core.Option) *Refine
- func (r *Refine) WithName(name string) *Refine
- type RefineConfig
- type RewardFunction
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
Types ¶
type ChainOfThought ¶
type ChainOfThought struct {
Predict *Predict
}
func NewChainOfThought ¶
func NewChainOfThought(signature core.Signature) *ChainOfThought
func NewTypedChainOfThought ¶ added in v0.61.0
func NewTypedChainOfThought[TInput, TOutput any]() *ChainOfThought
NewTypedChainOfThought creates a new type-safe ChainOfThought module from a typed signature. Typed modules use text-based parsing by default since they typically rely on prefixes.
func (*ChainOfThought) ClearInterceptors ¶ added in v0.49.0
func (c *ChainOfThought) ClearInterceptors()
ClearInterceptors removes all interceptors from this module.
func (*ChainOfThought) Clone ¶
func (c *ChainOfThought) Clone() core.Module
Clone creates a deep copy of the ChainOfThought module.
func (*ChainOfThought) Compose ¶ added in v0.1.0
func (c *ChainOfThought) Compose(next core.Module) core.Module
Compose creates a new module that chains this module with the next module.
func (*ChainOfThought) GetDisplayName ¶ added in v0.36.0
func (c *ChainOfThought) GetDisplayName() string
GetDisplayName returns the display name for this ChainOfThought module.
func (*ChainOfThought) GetInterceptors ¶ added in v0.49.0
func (c *ChainOfThought) GetInterceptors() []core.ModuleInterceptor
GetInterceptors returns the current interceptors for this module.
func (*ChainOfThought) GetModuleType ¶ added in v0.36.0
func (c *ChainOfThought) GetModuleType() string
GetModuleType returns "ChainOfThought".
func (*ChainOfThought) GetSignature ¶
func (c *ChainOfThought) GetSignature() core.Signature
GetSignature returns the signature from the internal Predict module.
func (*ChainOfThought) GetSubModules ¶ added in v0.1.0
func (c *ChainOfThought) GetSubModules() []core.Module
GetSubModules returns the sub-modules of this ChainOfThought.
func (*ChainOfThought) ProcessWithInterceptors ¶ added in v0.49.0
func (c *ChainOfThought) ProcessWithInterceptors(ctx context.Context, inputs map[string]any, interceptors []core.ModuleInterceptor, opts ...core.Option) (map[string]any, error)
ProcessWithInterceptors executes the module's logic with interceptor support.
func (*ChainOfThought) SetInterceptors ¶ added in v0.49.0
func (c *ChainOfThought) SetInterceptors(interceptors []core.ModuleInterceptor)
SetInterceptors sets the default interceptors for this module instance.
func (*ChainOfThought) SetLLM ¶
func (c *ChainOfThought) SetLLM(llm core.LLM)
SetLLM sets the LLM on the internal Predict module.
func (*ChainOfThought) SetSignature ¶ added in v0.28.0
func (c *ChainOfThought) SetSignature(signature core.Signature)
SetSignature sets the signature on the internal Predict module with rationale field.
func (*ChainOfThought) SetSubModules ¶ added in v0.1.0
func (c *ChainOfThought) SetSubModules(modules []core.Module)
SetSubModules sets the sub-modules (expects exactly one Predict module).
func (*ChainOfThought) WithDefaultOptions ¶ added in v0.17.1
func (c *ChainOfThought) WithDefaultOptions(opts ...core.Option) *ChainOfThought
WithDefaultOptions sets default options by configuring the underlying Predict module.
func (*ChainOfThought) WithName ¶ added in v0.36.0
func (c *ChainOfThought) WithName(name string) *ChainOfThought
WithName sets a semantic name for this ChainOfThought instance.
func (*ChainOfThought) WithStructuredOutput ¶ added in v0.70.0
func (c *ChainOfThought) WithStructuredOutput() *ChainOfThought
WithStructuredOutput enables native JSON structured output for ChainOfThought. This uses the LLM's GenerateWithJSON capability to produce structured responses that include both reasoning and output fields, eliminating parsing errors.
The output will include a "reasoning" field containing the step-by-step thought process.
Usage:
cot := modules.NewChainOfThought(signature).WithStructuredOutput()
func (*ChainOfThought) WithStructuredOutputConfig ¶ added in v0.70.0
func (c *ChainOfThought) WithStructuredOutputConfig(config interceptors.ChainOfThoughtStructuredConfig) *ChainOfThought
WithStructuredOutputConfig enables structured output with custom CoT configuration.
type MultiChainComparison ¶ added in v0.30.0
type MultiChainComparison struct {
core.BaseModule
M int // Number of attempts to compare
// contains filtered or unexported fields
}
MultiChainComparison implements the multi-chain comparison module that compares multiple reasoning attempts and produces a holistic evaluation.
func NewMultiChainComparison ¶ added in v0.30.0
func NewMultiChainComparison(signature core.Signature, M int, temperature float64, opts ...core.Option) *MultiChainComparison
NewMultiChainComparison creates a new MultiChainComparison module.
func NewTypedMultiChainComparison ¶ added in v0.61.0
func NewTypedMultiChainComparison[TInput, TOutput any](M int, temperature float64, opts ...core.Option) *MultiChainComparison
NewTypedMultiChainComparison creates a new type-safe MultiChainComparison module from a typed signature. Typed modules use text-based parsing by default since they typically rely on prefixes.
func (*MultiChainComparison) Clone ¶ added in v0.30.0
func (m *MultiChainComparison) Clone() core.Module
Clone creates a deep copy of the MultiChainComparison module.
func (*MultiChainComparison) GetSignature ¶ added in v0.30.0
func (m *MultiChainComparison) GetSignature() core.Signature
GetSignature returns the signature of the module.
func (*MultiChainComparison) Process ¶ added in v0.30.0
func (m *MultiChainComparison) Process(ctx context.Context, inputs map[string]interface{}, opts ...core.Option) (map[string]interface{}, error)
Process implements the core.Module interface. It takes completions and processes them into reasoning attempts for comparison.
func (*MultiChainComparison) SetLLM ¶ added in v0.30.0
func (m *MultiChainComparison) SetLLM(llm core.LLM)
SetLLM sets the LLM for the internal predict module.
func (*MultiChainComparison) WithName ¶ added in v0.36.0
func (m *MultiChainComparison) WithName(name string) *MultiChainComparison
WithName sets a semantic name for this MultiChainComparison instance.
type OfferFeedback ¶ added in v0.30.0
type OfferFeedback struct {
core.BaseModule
// contains filtered or unexported fields
}
OfferFeedback represents a module for generating advice to improve module performance. This is a simplified version of the Python implementation's OfferFeedback signature.
func NewOfferFeedback ¶ added in v0.30.0
func NewOfferFeedback() *OfferFeedback
NewOfferFeedback creates a new OfferFeedback module.
func (*OfferFeedback) Clone ¶ added in v0.30.0
func (of *OfferFeedback) Clone() core.Module
Clone creates a deep copy of the OfferFeedback module.
func (*OfferFeedback) Process ¶ added in v0.30.0
func (of *OfferFeedback) Process(ctx context.Context, inputs map[string]interface{}, opts ...core.Option) (map[string]interface{}, error)
Process generates feedback and advice for improving module performance.
func (*OfferFeedback) SetLLM ¶ added in v0.30.0
func (of *OfferFeedback) SetLLM(llm core.LLM)
SetLLM sets the language model for feedback generation.
type Parallel ¶ added in v0.30.0
type Parallel struct {
core.BaseModule
// contains filtered or unexported fields
}
Parallel executes a module against multiple inputs concurrently.
func NewParallel ¶ added in v0.30.0
func NewParallel(module core.Module, opts ...ParallelOption) *Parallel
NewParallel creates a new parallel execution wrapper around a module.
func NewTypedParallel ¶ added in v0.61.0
func NewTypedParallel[TInput, TOutput any](innerModule core.Module, opts ...ParallelOption) *Parallel
NewTypedParallel creates a new type-safe Parallel module from a typed signature. Typed modules use text-based parsing by default since they typically rely on prefixes.
func (*Parallel) GetInnerModule ¶ added in v0.30.0
GetInnerModule returns the wrapped module.
func (*Parallel) Process ¶ added in v0.30.0
func (p *Parallel) Process(ctx context.Context, inputs map[string]interface{}, opts ...core.Option) (map[string]interface{}, error)
Process executes the inner module against multiple inputs in parallel.
type ParallelOption ¶ added in v0.30.0
type ParallelOption func(*ParallelOptions)
ParallelOption is a function that configures ParallelOptions.
func WithMaxWorkers ¶ added in v0.30.0
func WithMaxWorkers(count int) ParallelOption
WithMaxWorkers sets the maximum number of concurrent workers.
func WithReturnFailures ¶ added in v0.30.0
func WithReturnFailures(returnFailures bool) ParallelOption
WithReturnFailures configures whether to return failed results.
func WithStopOnFirstError ¶ added in v0.30.0
func WithStopOnFirstError(stopOnError bool) ParallelOption
WithStopOnFirstError configures whether to stop on first error.
type ParallelOptions ¶ added in v0.30.0
type ParallelOptions struct {
// MaxWorkers sets the maximum number of concurrent workers.
// If 0, defaults to 100 workers (optimized for I/O-bound remote API calls).
// For local models (CPU-bound), set to runtime.NumCPU() for optimal performance.
MaxWorkers int
// ReturnFailures determines if failed results should be included in output
ReturnFailures bool
// StopOnFirstError stops execution on first error encountered
StopOnFirstError bool
}
ParallelOptions configures parallel execution behavior.
type ParallelResult ¶ added in v0.30.0
type ParallelResult struct {
Index int // Original index in the input batch
Success bool // Whether execution succeeded
Output map[string]interface{} // The actual output
Error error // Error if execution failed
}
ParallelResult contains the result of a parallel execution.
type Predict ¶
type Predict struct {
core.BaseModule
Demos []core.Example
LLM core.LLM
// contains filtered or unexported fields
}
func NewPredict ¶
func NewTypedPredict ¶ added in v0.52.0
NewTypedPredict creates a new type-safe Predict module from a typed signature. Typed modules use text-based parsing by default since they typically rely on prefixes.
func (*Predict) GetLLMIdentifier ¶ added in v0.27.0
GetLLMIdentifier implements the LMConfigProvider interface.
func (*Predict) GetSignature ¶
func (*Predict) GetXMLConfig ¶ added in v0.54.0
func (p *Predict) GetXMLConfig() *interceptors.XMLConfig
GetXMLConfig returns the XML configuration if XML mode is enabled.
func (*Predict) IsXMLModeEnabled ¶ added in v0.54.0
IsXMLModeEnabled returns true if XML mode is enabled for this module.
func (*Predict) ProcessWithInterceptors ¶ added in v0.49.0
func (p *Predict) ProcessWithInterceptors(ctx context.Context, inputs map[string]any, interceptors []core.ModuleInterceptor, opts ...core.Option) (map[string]any, error)
ProcessWithInterceptors executes the Predict module's logic with interceptor support.
func (*Predict) WithDefaultOptions ¶ added in v0.17.1
func (*Predict) WithStructuredOutput ¶ added in v0.70.0
WithStructuredOutput enables native JSON structured output instead of text parsing. This uses the LLM's GenerateWithJSON capability to produce structured responses that map directly to the signature's output fields, eliminating parsing errors.
Benefits:
- No parsing errors from malformed prefixes
- Strongly typed output from the LLM
- Works with any signature without custom prefix configuration
- More reliable extraction of multiple output fields
Requirements:
- The LLM must support CapabilityJSON
- Falls back to text-based parsing if not supported
Usage:
predict := modules.NewPredict(signature).WithStructuredOutput()
func (*Predict) WithStructuredOutputConfig ¶ added in v0.70.0
func (p *Predict) WithStructuredOutputConfig(config interceptors.StructuredOutputConfig) *Predict
WithStructuredOutputConfig enables structured output with custom configuration.
func (*Predict) WithTextOutput ¶ added in v0.54.0
WithTextOutput disables XML output and uses traditional text-based parsing. This is an escape hatch for users who prefer the original behavior.
IMPORTANT: This method currently removes ALL interceptors from the module, not just XML-related ones. This means any custom interceptors you've configured (such as logging, caching, or metrics) will also be removed.
TODO(#interceptor-preservation): Implement selective removal of only XML interceptors. This requires an interceptor identification mechanism since interceptors are function types. Possible solutions:
- Wrap interceptors in a struct with metadata
- Maintain a separate list of XML interceptor indices
- Use a registry pattern for interceptor management
Until this is fixed, if you need to preserve custom interceptors:
- Save your interceptors before calling WithTextOutput()
- Re-add them after calling WithTextOutput()
Example workaround:
customInterceptors := predict.GetInterceptors()[:2] // Save first 2 custom interceptors predict.WithTextOutput() predict.SetInterceptors(customInterceptors) // Re-add them
func (*Predict) WithXMLOutput ¶ added in v0.54.0
func (p *Predict) WithXMLOutput(config interceptors.XMLConfig) *Predict
WithXMLOutput enables XML interceptor-based output formatting. This provides structured XML output with validation, security features, and error handling.
type ReAct ¶
type ReAct struct {
core.BaseModule
Predict *Predict
Extract *ChainOfThought // Fallback extraction module for when loop ends without Finish
Registry *tools.InMemoryToolRegistry
MaxIters int
XMLConfig *interceptors.XMLConfig // Optional XML config for enhanced parsing
}
ReAct implements the ReAct agent loop (Reason, Action, Observation). It uses a Predict module to generate thoughts and actions, and executes tools. If the loop ends without a "Finish" action (e.g., max iterations reached), a fallback Extract module attempts to produce an answer from the gathered trajectory.
func NewReAct ¶
NewReAct creates a new ReAct module. It takes a signature (which it modifies), a tool registry pointer, and max iterations.
func NewTypedReAct ¶ added in v0.61.0
func NewTypedReAct[TInput, TOutput any](registry *tools.InMemoryToolRegistry, maxIters int) *ReAct
NewTypedReAct creates a new type-safe ReAct module from a typed signature. Typed modules use text-based parsing by default since they typically rely on prefixes.
func (*ReAct) Clone ¶
Clone creates a copy of the ReAct module. Note: Predict and Extract modules are cloned, but the LLM instance and ToolRegistry are shared. Cloning the registry itself might be complex and depends on the registry implementation. Sharing the registry is usually acceptable.
func (*ReAct) Process ¶
func (r *ReAct) Process(ctx context.Context, inputs map[string]any, opts ...core.Option) (map[string]any, error)
Process executes the ReAct loop.
func (*ReAct) SetLLM ¶
SetLLM sets the language model for the base module and all internal modules (Predict and Extract).
func (*ReAct) WithDefaultOptions ¶ added in v0.17.1
WithDefaultOptions sets default options by configuring the underlying Predict module.
func (*ReAct) WithNativeFunctionCalling ¶ added in v0.70.0
WithNativeFunctionCalling enables native LLM function calling for action selection. This bypasses text-based XML parsing entirely by using the LLM's built-in function/tool calling capabilities (e.g., OpenAI function calling, Gemini tools).
Benefits:
- Eliminates parsing errors and hallucinated observations
- Strongly typed tool arguments from the LLM
- More reliable tool selection
Requirements:
- The LLM must support CapabilityToolCalling
- Falls back to text-based parsing if not supported
Usage:
react := modules.NewReAct(signature, registry, maxIters) react.WithNativeFunctionCalling() // Enable native function calling
func (*ReAct) WithNativeFunctionCallingConfig ¶ added in v0.70.0
func (r *ReAct) WithNativeFunctionCallingConfig(config interceptors.FunctionCallingConfig) *ReAct
WithNativeFunctionCallingConfig enables native function calling with custom configuration.
func (*ReAct) WithXMLParsing ¶ added in v0.54.0
func (r *ReAct) WithXMLParsing(config interceptors.XMLConfig) *ReAct
WithXMLParsing enables XML interceptor-based parsing for tool actions. This replaces the hardcoded XML parsing with configurable XML interceptors.
type Refine ¶ added in v0.30.0
type Refine struct {
core.BaseModule
// contains filtered or unexported fields
}
Refine implements a refinement module that runs predictions multiple times with varying temperatures to improve quality based on a reward function.
func NewRefine ¶ added in v0.30.0
func NewRefine(module core.Module, config RefineConfig) *Refine
NewRefine creates a new Refine module with the specified configuration.
func NewTypedRefine ¶ added in v0.61.0
func NewTypedRefine[TInput, TOutput any](module core.Module, config RefineConfig) *Refine
NewTypedRefine creates a new type-safe Refine module from a typed signature. Typed modules use text-based parsing by default since they typically rely on prefixes.
func (*Refine) GetConfig ¶ added in v0.30.0
func (r *Refine) GetConfig() RefineConfig
GetConfig returns the current refinement configuration.
func (*Refine) GetSignature ¶ added in v0.30.0
GetSignature returns the module's signature.
func (*Refine) GetWrappedModule ¶ added in v0.30.0
GetWrappedModule returns the underlying module being refined.
func (*Refine) Process ¶ added in v0.30.0
func (r *Refine) Process(ctx context.Context, inputs map[string]interface{}, opts ...core.Option) (map[string]interface{}, error)
Process executes the refinement logic by running the module multiple times with different temperatures and selecting the best result based on the reward function.
func (*Refine) SetSignature ¶ added in v0.30.0
SetSignature updates the signature for both this module and the wrapped module.
func (*Refine) UpdateConfig ¶ added in v0.30.0
func (r *Refine) UpdateConfig(config RefineConfig) *Refine
UpdateConfig allows updating the refinement configuration.
func (*Refine) WithDefaultOptions ¶ added in v0.30.0
WithDefaultOptions sets default options for the module.
type RefineConfig ¶ added in v0.30.0
type RefineConfig struct {
// Number of refinement attempts
N int
// Reward function to evaluate predictions
RewardFn RewardFunction
// Minimum threshold for acceptable predictions
Threshold float64
// Number of failed attempts before giving up (optional)
FailCount *int
}
RefineConfig holds configuration options for the Refine module.
type RewardFunction ¶ added in v0.30.0
RewardFunction represents a function that evaluates the quality of a prediction. It takes the inputs used and the outputs produced, and returns a reward score. Higher scores indicate better predictions.