1
0
mirror of https://github.com/IBM/fp-go.git synced 2025-12-19 23:42:05 +02:00

Compare commits

...

68 Commits

Author SHA1 Message Date
Dr. Carsten Leue
255cf4353c fix: better formatting
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-18 16:07:26 +01:00
Dr. Carsten Leue
4dfc1b5a44 fix: better doc and implementation of retry
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-17 16:28:28 +01:00
Dr. Carsten Leue
20398e67a9 fix: better doc and implementation of retry
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-17 15:58:11 +01:00
Dr. Carsten Leue
fceda15701 doc: improve docs
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-17 10:11:58 +01:00
Dr. Carsten Leue
4ebfcadabe fix: add better tests
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-16 14:03:01 +01:00
Dr. Carsten Leue
acb601fc01 fix: reuse some more code
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-15 16:30:40 +01:00
Dr. Carsten Leue
d17663f016 fix: better doc
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-15 11:16:09 +01:00
Dr. Carsten Leue
829365fc24 doc: improve docs
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-12 13:30:10 +01:00
Dr. Carsten Leue
64b5660b4e doc: remove some comments
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-12 12:35:53 +01:00
Dr. Carsten Leue
16e82d6a65 fix: better cancellation support
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-12 11:52:43 +01:00
Dr. Carsten Leue
0d40fdcebb fix: implement tail recursion
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-12 11:18:32 +01:00
Dr. Carsten Leue
6a4dfa2c93 fix: better doc
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-11 16:18:55 +01:00
Dr. Carsten Leue
a37f379a3c fix: semantic of MapTo and ChainTo and update tests
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-11 09:09:44 +01:00
Dr. Carsten Leue
ece0cd135d fix: add more tests and logging
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-10 18:23:19 +01:00
Dr. Carsten Leue
739b6a284c fix: better slog based logging
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-09 17:52:57 +01:00
Dr. Carsten Leue
ba10d8d314 doc: fix docs
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-09 13:00:03 +01:00
Dr. Carsten Leue
3d6c419185 fix: add better logging
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-09 12:49:44 +01:00
Dr. Carsten Leue
3f4b6292e4 fix: optimize Traverse
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-05 21:35:05 +01:00
Dr. Carsten Leue
b1704b6d26 fix: implement TraverseReader
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-05 17:51:13 +01:00
Dr. Carsten Leue
ffdfd218f8 fix: implement Flip for Reader
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-05 11:04:49 +01:00
Dr. Carsten Leue
34826d8c52 fix: Ask and add tests to retry
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-04 16:47:53 +01:00
Dr. Carsten Leue
24c0519cc7 fix: try to unify type signatures
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-04 16:31:21 +01:00
Dr. Carsten Leue
ff48d8953e fix: implement some missing methods in reader io
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-04 13:50:25 +01:00
Dr. Carsten Leue
d739c9b277 fix: add doc to readerio
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-12-03 18:13:59 +01:00
Dr. Carsten Leue
f0054431a5 fix: add logging to readerio 2025-12-03 18:07:06 +01:00
Carsten Leue
1a89ec3df7 fix: implement Sequence for Pair
Signed-off-by: Carsten Leue <carsten.leue@de.ibm.com>
2025-11-28 11:22:23 +01:00
Carsten Leue
f652a94c3a fix: add template based logger
Signed-off-by: Carsten Leue <carsten.leue@de.ibm.com>
2025-11-28 10:11:08 +01:00
Dr. Carsten Leue
774db88ca5 fix: add name to prism
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-27 13:26:36 +01:00
Dr. Carsten Leue
62a3365b20 fix: add conversion prisms for numbers
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-27 13:12:18 +01:00
Dr. Carsten Leue
d9a16a6771 fix: add reduce operations to readerioresult
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-26 17:00:10 +01:00
Dr. Carsten Leue
8949cc7dca fix: expose stats
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-26 13:44:40 +01:00
Dr. Carsten Leue
fa6b6caf22 fix: generic order for reader.Flap
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-26 12:53:13 +01:00
Dr. Carsten Leue
a1e8d397c3 fix: better doc and some helpers
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-26 12:06:09 +01:00
Dr. Carsten Leue
dbe7102e43 fix: better doc and some helpers
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-26 12:05:31 +01:00
Dr. Carsten Leue
09aeb996e2 fix: add GetOrElseOf
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-24 18:57:30 +01:00
Dr. Carsten Leue
7cd575d95a fix: improve Prism and Optional
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-24 18:22:52 +01:00
Dr. Carsten Leue
dcfb023891 fix: improve assertions
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-24 17:28:48 +01:00
Dr. Carsten Leue
51cf241a26 fix: add ReaderK
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-24 12:29:55 +01:00
Dr. Carsten Leue
9004c93976 fix: add some idomatic helpers
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-24 10:40:58 +01:00
Dr. Carsten Leue
d8ab6b0ce5 fix: ChainReaderK
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-22 10:39:56 +01:00
Dr. Carsten Leue
4e9998b645 fix: benchmarks and better docs
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-21 15:39:41 +01:00
Dr. Carsten Leue
2ea9e292e1 fix: idiomatic/readeresult
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-21 15:25:59 +01:00
Dr. Carsten Leue
12a20e30d1 fix: implement BindReaderK
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-21 13:01:27 +01:00
Dr. Carsten Leue
4909ad5473 fix: add missing monoid
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-21 10:22:50 +01:00
Dr. Carsten Leue
d116317cde fix: add readerresult
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-21 10:04:28 +01:00
Dr. Carsten Leue
1428241f2c fix: race condition
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-21 08:36:07 +01:00
Dr. Carsten Leue
ef9216bad7 fix: documentation, tests, some utilities
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-20 08:43:15 +01:00
Dr. Carsten Leue
fe77c770b6 fix: cleanup types
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-19 17:36:49 +01:00
Dr. Carsten Leue
1c42b2ac1d fix: implement idiomatic/ioresult
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-19 15:39:02 +01:00
Dr. Carsten Leue
cbd93fdecc fix: add statereaderioresult
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-18 17:54:04 +01:00
Dr. Carsten Leue
6d94697128 fix: document statereaderioeither
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-18 16:06:56 +01:00
Dr. Carsten Leue
77dde302ef Merge branch 'main' of github.com:IBM/fp-go 2025-11-18 10:59:57 +01:00
Dr. Carsten Leue
909d626019 fix: serveral performance improvements
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-18 10:58:24 +01:00
renovate[bot]
b01a8f2aff chore(deps): update actions/checkout action to v4.3.1 (#145)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-18 06:31:59 +00:00
Dr. Carsten Leue
8a2e9539b1 fix: add result
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-17 20:36:06 +01:00
Dr. Carsten Leue
03d9720a29 fix: optimize performance for option
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-17 12:19:24 +01:00
Dr. Carsten Leue
57794ccb34 fix: add idiomatic go options package
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-17 11:10:27 +01:00
Dr. Carsten Leue
404eb875d3 fix: add idiomatic version
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-16 17:27:16 +01:00
Dr. Carsten Leue
ed108812d6 fix: modernize codebase
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-15 17:00:22 +01:00
Dr. Carsten Leue
ab868315d4 fix: traverse
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-15 12:13:37 +01:00
Dr. Carsten Leue
02d0be9dad fix: add traversal for sequences
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-14 14:12:44 +01:00
Dr. Carsten Leue
2c1d8196b4 fix: support go iterators and cleanup types
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-14 12:56:12 +01:00
Dr. Carsten Leue
17eb8ae66f fix: add Chain...Left methods
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-13 16:51:15 +01:00
Dr. Carsten Leue
b70e481e7d fix: some minor improvements
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-13 12:56:51 +01:00
Dr. Carsten Leue
3c3bb7c166 fix: improve lens implementation
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-13 12:15:52 +01:00
Dr. Carsten Leue
d3007cbbfa fix: improve lens generator
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-13 09:39:18 +01:00
Dr. Carsten Leue
5aa0e1ea2e fix: handle non comparable types
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-13 09:35:56 +01:00
Dr. Carsten Leue
d586428cb0 fix: examples
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2025-11-13 09:05:57 +01:00
706 changed files with 111162 additions and 6369 deletions

View File

@@ -28,7 +28,7 @@ jobs:
fail-fast: false # Continue with other versions if one fails fail-fast: false # Continue with other versions if one fails
steps: steps:
# full checkout for semantic-release # full checkout for semantic-release
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Go ${{ matrix.go-version }} - name: Set up Go ${{ matrix.go-version }}
@@ -66,7 +66,7 @@ jobs:
matrix: matrix:
go-version: ['1.24.x', '1.25.x'] go-version: ['1.24.x', '1.25.x']
steps: steps:
- uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Go ${{ matrix.go-version }} - name: Set up Go ${{ matrix.go-version }}
@@ -126,7 +126,7 @@ jobs:
steps: steps:
# full checkout for semantic-release # full checkout for semantic-release
- name: Full checkout - name: Full checkout
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1
with: with:
fetch-depth: 0 fetch-depth: 0

View File

@@ -0,0 +1,17 @@
{
"permissions": {
"allow": [
"Bash(ls -la \"c:\\d\\fp-go\\v2\\internal\\monad\"\" && ls -la \"c:dfp-gov2internalapplicative\"\")",
"Bash(ls -la \"c:\\d\\fp-go\\v2\\internal\\chain\"\" && ls -la \"c:dfp-gov2internalfunctor\"\")",
"Bash(go build:*)",
"Bash(go test:*)",
"Bash(go doc:*)",
"Bash(go tool cover:*)",
"Bash(sort:*)",
"Bash(tee:*)",
"Bash(find:*)"
],
"deny": [],
"ask": []
}
}

482
v2/BENCHMARK_COMPARISON.md Normal file
View File

@@ -0,0 +1,482 @@
# Benchmark Comparison: Idiomatic vs Standard Either/Result
**Date:** 2025-11-18
**System:** AMD Ryzen 7 PRO 7840U w/ Radeon 780M Graphics (16 cores)
**Go Version:** go1.23+
This document provides a detailed performance comparison between the optimized `either` package and the `idiomatic/result` package after recent optimizations to the either package.
## Executive Summary
After optimizations to the `either` package, the performance characteristics have changed significantly:
### Key Findings
1. **Constructors & Predicates**: Both packages now perform comparably (~1-2 ns/op) with **zero heap allocations**
2. **Zero-allocation insight**: The `Either` struct (24 bytes) does NOT escape to heap - Go returns it by value on the stack
3. **Core Operations**: Idiomatic package has a **consistent advantage** of 1.2x - 2.3x for most operations
4. **Complex Operations**: Idiomatic package shows **massive advantages**:
- ChainFirst (Right): **32.4x faster** (87.6 ns → 2.7 ns, 72 B → 0 B)
- Pipeline operations: **2-3x faster** with lower allocations
5. **All simple operations**: Both maintain **zero heap allocations** (0 B/op, 0 allocs/op)
### Winner by Category
| Category | Winner | Reason |
|----------|--------|--------|
| Constructors | **TIE** | Both ~1.3-1.8 ns/op |
| Predicates | **TIE** | Both ~1.2-1.5 ns/op |
| Simple Transformations | **Idiomatic** | 1.2-2x faster |
| Monadic Operations | **Idiomatic** | 1.2-2.3x faster |
| Complex Chains | **Idiomatic** | 32x faster, zero allocs |
| Pipelines | **Idiomatic** | 2-2.4x faster, fewer allocs |
| Extraction | **Idiomatic** | 6x faster (GetOrElse) |
## Detailed Benchmark Results
### Constructor Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| Left | 1.76 | **1.35** | **1.3x** ✓ | 0 B/op | 0 B/op |
| Right | 1.38 | 1.43 | 1.0x | 0 B/op | 0 B/op |
| Of | 1.68 | **1.22** | **1.4x** ✓ | 0 B/op | 0 B/op |
**Analysis:** Both packages perform extremely well with **zero heap allocations**. Idiomatic has a slight edge on Left and Of.
**Important Clarification: Neither Package Escapes to Heap**
A common misconception is that struct-based Either escapes to heap while tuples stay on stack. The benchmarks prove this is FALSE:
```go
// Either package - NO heap allocation
type Either[E, A any] struct {
r A // 8 bytes
l E // 8 bytes
isLeft bool // 1 byte + 7 padding
} // Total: 24 bytes
func Of[E, A any](value A) Either[E, A] {
return Right[E](value) // Returns 24-byte struct BY VALUE
}
// Benchmark result: 0 B/op, 0 allocs/op ✓
```
**Why Either doesn't escape:**
1. **Small struct** - At 24 bytes, it's below Go's escape threshold (~64 bytes)
2. **Return by value** - Go returns small structs on the stack
3. **Inlining** - The `//go:inline` directive eliminates function overhead
4. **No pointers** - No pointer escapes in normal usage
**Idiomatic package:**
```go
// Returns native tuple - always stack allocated
func Right[A any](a A) (A, error) {
return a, nil // 16 bytes total (8 + 8)
}
// Benchmark result: 0 B/op, 0 allocs/op ✓
```
**Both achieve zero allocations** - the performance difference comes from other factors like function composition overhead, not from constructor allocations.
### Predicate Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| IsLeft | 1.45 | **1.35** | **1.1x** ✓ | 0 B/op | 0 B/op |
| IsRight | 1.47 | 1.51 | 1.0x | 0 B/op | 0 B/op |
**Analysis:** Virtually identical performance. The optimizations brought them to parity.
### Fold Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| MonadFold (Right) | 2.71 | - | - | 0 B/op | - |
| MonadFold (Left) | 2.26 | - | - | 0 B/op | - |
| Fold (Right) | 4.03 | **2.75** | **1.5x** ✓ | 0 B/op | 0 B/op |
| Fold (Left) | 3.69 | **2.40** | **1.5x** ✓ | 0 B/op | 0 B/op |
**Analysis:** Idiomatic package is 1.5x faster for curried Fold operations.
### Unwrap Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Note |
|-----------|----------------|-------------------|------|
| Unwrap (Right) | 1.27 | N/A | Either-specific |
| Unwrap (Left) | 1.24 | N/A | Either-specific |
| UnwrapError (Right) | 1.27 | N/A | Either-specific |
| UnwrapError (Left) | 1.27 | N/A | Either-specific |
| ToError (Right) | N/A | 1.40 | Idiomatic-specific |
| ToError (Left) | N/A | 1.84 | Idiomatic-specific |
**Analysis:** Both provide fast unwrapping. Idiomatic's tuple return is naturally unwrapped.
### Map Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| MonadMap (Right) | 2.96 | - | - | 0 B/op | - |
| MonadMap (Left) | 1.99 | - | - | 0 B/op | - |
| Map (Right) | 5.13 | **4.34** | **1.2x** ✓ | 0 B/op | 0 B/op |
| Map (Left) | 4.19 | **2.48** | **1.7x** ✓ | 0 B/op | 0 B/op |
| MapLeft (Right) | 3.93 | **2.22** | **1.8x** ✓ | 0 B/op | 0 B/op |
| MapLeft (Left) | 7.22 | **3.51** | **2.1x** ✓ | 0 B/op | 0 B/op |
**Analysis:** Idiomatic is consistently faster across all Map variants, especially for error path (Left).
### BiMap Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| BiMap (Right) | 16.79 | **3.82** | **4.4x** ✓ | 0 B/op | 0 B/op |
| BiMap (Left) | 11.47 | **3.47** | **3.3x** ✓ | 0 B/op | 0 B/op |
**Analysis:** Idiomatic package shows significant advantage for BiMap operations (3-4x faster).
### Chain (Monadic Bind) Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| MonadChain (Right) | 2.89 | - | - | 0 B/op | - |
| MonadChain (Left) | 2.03 | - | - | 0 B/op | - |
| Chain (Right) | 5.44 | **2.34** | **2.3x** ✓ | 0 B/op | 0 B/op |
| Chain (Left) | 4.44 | **2.53** | **1.8x** ✓ | 0 B/op | 0 B/op |
| ChainFirst (Right) | 87.62 | **2.71** | **32.4x** ✓✓✓ | 72 B, 3 allocs | 0 B, 0 allocs |
| ChainFirst (Left) | 3.94 | **2.48** | **1.6x** ✓ | 0 B/op | 0 B/op |
**Analysis:**
- Idiomatic is 2x faster for standard Chain operations
- **ChainFirst shows the most dramatic difference**: 32.4x faster with zero allocations vs 72 bytes!
### Flatten Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Note |
|-----------|----------------|-------------------|------|
| Flatten (Right) | 8.73 | N/A | Either-specific nested structure |
| Flatten (Left) | 8.86 | N/A | Either-specific nested structure |
**Analysis:** Flatten is specific to Either's nested structure handling.
### Applicative Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| MonadAp (RR) | 3.81 | - | - | 0 B/op | - |
| MonadAp (RL) | 3.07 | - | - | 0 B/op | - |
| MonadAp (LR) | 3.08 | - | - | 0 B/op | - |
| Ap (RR) | 6.99 | - | - | 0 B/op | - |
**Analysis:** MonadAp is fast in Either. Idiomatic package doesn't expose direct Ap benchmarks.
### Alternative Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| Alt (RR) | 5.72 | **2.40** | **2.4x** ✓ | 0 B/op | 0 B/op |
| Alt (LR) | 4.89 | **2.39** | **2.0x** ✓ | 0 B/op | 0 B/op |
| OrElse (Right) | 5.28 | **2.40** | **2.2x** ✓ | 0 B/op | 0 B/op |
| OrElse (Left) | 3.99 | **2.42** | **1.6x** ✓ | 0 B/op | 0 B/op |
**Analysis:** Idiomatic package is consistently 2x faster for alternative operations.
### GetOrElse Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| GetOrElse (Right) | 9.01 | **1.49** | **6.1x** ✓✓ | 0 B/op | 0 B/op |
| GetOrElse (Left) | 6.35 | **2.08** | **3.1x** ✓✓ | 0 B/op | 0 B/op |
**Analysis:** Idiomatic package shows dramatic advantage for value extraction (3-6x faster).
### TryCatch Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Note |
|-----------|----------------|-------------------|------|
| TryCatch (Success) | 2.39 | N/A | Either-specific |
| TryCatch (Error) | 3.40 | N/A | Either-specific |
| TryCatchError (Success) | 3.32 | N/A | Either-specific |
| TryCatchError (Error) | 6.44 | N/A | Either-specific |
**Analysis:** TryCatch/TryCatchError are Either-specific for wrapping (value, error) tuples.
### Other Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| Swap (Right) | 2.30 | - | - | 0 B/op | - |
| Swap (Left) | 3.05 | - | - | 0 B/op | - |
| MapTo (Right) | - | 1.60 | - | - | 0 B/op |
| MapTo (Left) | - | 1.73 | - | - | 0 B/op |
| ChainTo (Right) | - | 2.66 | - | - | 0 B/op |
| ChainTo (Left) | - | 2.85 | - | - | 0 B/op |
| Reduce (Right) | - | 2.34 | - | - | 0 B/op |
| Reduce (Left) | - | 1.40 | - | - | 0 B/op |
| Flap (Right) | - | 3.86 | - | - | 0 B/op |
| Flap (Left) | - | 2.58 | - | - | 0 B/op |
### FromPredicate Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| FromPredicate (Pass) | - | 3.38 | - | - | 0 B/op |
| FromPredicate (Fail) | - | 5.03 | - | - | 0 B/op |
**Analysis:** FromPredicate in idiomatic shows good performance for validation patterns.
### Option Conversion
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| ToOption (Right) | - | 1.17 | - | - | 0 B/op |
| ToOption (Left) | - | 1.21 | - | - | 0 B/op |
| FromOption (Some) | - | 2.68 | - | - | 0 B/op |
| FromOption (None) | - | 3.72 | - | - | 0 B/op |
**Analysis:** Very fast conversion between Result and Option in idiomatic package.
## Pipeline Benchmarks
These benchmarks measure realistic composition scenarios using F.Pipe.
### Simple Map Pipeline
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| Pipeline Map (Right) | 112.7 | **46.5** | **2.4x** ✓ | 72 B, 3 allocs | 48 B, 2 allocs |
| Pipeline Map (Left) | 116.8 | **47.2** | **2.5x** ✓ | 72 B, 3 allocs | 48 B, 2 allocs |
### Chain Pipeline
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| Pipeline Chain (Right) | 74.4 | **26.1** | **2.9x** ✓ | 48 B, 2 allocs | 24 B, 1 allocs |
| Pipeline Chain (Left) | 86.4 | **25.7** | **3.4x** ✓ | 48 B, 2 allocs | 24 B, 1 allocs |
### Complex Pipeline (Map → Chain → Map)
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| Complex (Right) | 279.8 | **116.3** | **2.4x** ✓ | 192 B, 8 allocs | 120 B, 5 allocs |
| Complex (Left) | 288.1 | **115.8** | **2.5x** ✓ | 192 B, 8 allocs | 120 B, 5 allocs |
**Analysis:**
- Idiomatic package shows **2-3.4x speedup** for realistic pipelines
- Significantly fewer allocations in all pipeline scenarios
- The gap widens as pipelines become more complex
## Array/Collection Operations
### TraverseArray
| Operation | Either (ns/op) | Idiomatic (ns/op) | Note |
|-----------|----------------|-------------------|------|
| TraverseArray (Success) | - | 32.3 | 48 B, 1 alloc |
| TraverseArray (Error) | - | 28.3 | 48 B, 1 alloc |
**Analysis:** Idiomatic package provides efficient array traversal with minimal allocations.
## Validation (ApV)
### ApV Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| ApV (BothRight) | - | 1.17 | - | - | 0 B/op |
| ApV (BothLeft) | - | 141.5 | - | - | 48 B, 2 allocs |
**Analysis:** Idiomatic's validation applicative shows fast success path, with allocations only when accumulating errors.
## String Formatting
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| String/ToString (Right) | 139.9 | **81.8** | **1.7x** ✓ | 16 B, 1 alloc | 16 B, 1 alloc |
| String/ToString (Left) | 161.6 | **72.7** | **2.2x** ✓ | 48 B, 1 alloc | 24 B, 1 alloc |
**Analysis:** Idiomatic package formats strings faster with fewer allocations for Left values.
## Do-Notation
| Operation | Either (ns/op) | Idiomatic (ns/op) | Note |
|-----------|----------------|-------------------|------|
| Do | 2.03 | - | Either-specific |
| Bind | 153.4 | - | 96 B, 4 allocs |
| Let | 33.5 | - | 16 B, 1 alloc |
**Analysis:** Do-notation is specific to Either package for monadic composition patterns.
## Summary Statistics
### Simple Operations (< 10 ns/op)
**Either Package:**
- Count: 24 operations
- Average: 3.2 ns/op
- Range: 1.24 - 9.01 ns/op
**Idiomatic Package:**
- Count: 36 operations
- Average: 2.1 ns/op
- Range: 1.17 - 5.03 ns/op
**Winner:** Idiomatic (1.5x faster average)
### Complex Operations (Pipelines, allocations)
**Either Package:**
- Pipeline Map: 112.7 ns/op (72 B, 3 allocs)
- Pipeline Chain: 74.4 ns/op (48 B, 2 allocs)
- Complex: 279.8 ns/op (192 B, 8 allocs)
- ChainFirst: 87.6 ns/op (72 B, 3 allocs)
**Idiomatic Package:**
- Pipeline Map: 46.5 ns/op (48 B, 2 allocs)
- Pipeline Chain: 26.1 ns/op (24 B, 1 allocs)
- Complex: 116.3 ns/op (120 B, 5 allocs)
- ChainFirst: 2.71 ns/op (0 B, 0 allocs)
**Winner:** Idiomatic (2-32x faster, significantly fewer allocations)
### Allocation Analysis
**Either Package:**
- Zero-allocation operations: Most simple operations
- Operations with allocations: Pipelines, Bind, Do-notation, ChainFirst
**Idiomatic Package:**
- Zero-allocation operations: Almost all operations except pipelines and validation
- Significantly fewer allocations in pipeline scenarios
- ChainFirst: **Zero allocations** (vs 72 B in Either)
## Performance Characteristics
### Where Either Package Excels
1. **Comparable to Idiomatic**: After optimizations, Either matches Idiomatic for constructors and predicates
2. **Feature Richness**: More operations (Do-notation, Bind, Let, Flatten, Swap)
3. **Type Flexibility**: Full Either[E, A] with custom error types
### Where Idiomatic Package Excels
1. **Core Operations**: 1.2-2.3x faster for Map, Chain, Fold
2. **Complex Operations**: 32x faster for ChainFirst
3. **Pipelines**: 2-3.4x faster with fewer allocations
4. **Extraction**: 3-6x faster for GetOrElse
5. **Alternative**: 2x faster for Alt/OrElse
6. **BiMap**: 3-4x faster
7. **Consistency**: More predictable performance profile
## Real-World Performance Impact
### Hot Path Example (1 million operations)
```go
// Map operation (very common)
// Either: 5.13 ns/op × 1M = 5.13 ms
// Idiomatic: 4.34 ns/op × 1M = 4.34 ms
// Savings: 0.79 ms per million operations
// Chain operation (common in pipelines)
// Either: 5.44 ns/op × 1M = 5.44 ms
// Idiomatic: 2.34 ns/op × 1M = 2.34 ms
// Savings: 3.10 ms per million operations
// Pipeline Complex (realistic composition)
// Either: 279.8 ns/op × 1M = 279.8 ms
// Idiomatic: 116.3 ns/op × 1M = 116.3 ms
// Savings: 163.5 ms per million operations
```
### Memory Impact
For 1 million ChainFirst operations:
- Either: 72 MB allocated
- Idiomatic: 0 MB allocated
- **Savings: 72 MB + reduced GC pressure**
## Recommendations
### Use Idiomatic Package When:
1. **Performance is Critical**
- Hot paths in your application
- High-throughput services (>10k req/s)
- Complex operation chains
- Memory-constrained environments
2. **Natural Go Integration**
- Working with stdlib (value, error) patterns
- Team familiar with Go idioms
- Simple migration from existing code
- Want zero-cost abstractions
3. **Pipeline-Heavy Code**
- 2-3.4x faster pipelines
- Significantly fewer allocations
- Better CPU cache utilization
### Use Either Package When:
1. **Feature Requirements**
- Need custom error types (Either[E, A])
- Using Do-notation for complex compositions
- Need Flatten, Swap, or other Either-specific operations
- Porting from FP languages (Scala, Haskell)
2. **Type Safety Over Performance**
- Explicit Either semantics
- Algebraic data type guarantees
- Teaching/learning FP concepts
3. **Moderate Performance Needs**
- After optimizations, Either is quite fast
- Difference matters only at high scale
- Code clarity > micro-optimizations
### Hybrid Approach
```go
// Use Either for complex type safety
import "github.com/IBM/fp-go/v2/either"
type ValidationError struct { Field, Message string }
validated := either.Either[ValidationError, Input]{...}
// Convert to Idiomatic for hot path
import "github.com/IBM/fp-go/v2/idiomatic/result"
value, err := either.UnwrapError(either.MapLeft(toError)(validated))
processed, err := result.Chain(hotPathProcessing)(value, err)
```
## Conclusion
After optimizations to the Either package:
1. **Both packages achieve zero heap allocations for constructors** - The Either struct (24 bytes) does NOT escape to heap
2. **Simple operations** are now **comparable** between both packages (~1-2 ns/op, 0 B/op)
3. **Core transformations** favor Idiomatic by **1.2-2.3x**
4. **Complex operations** heavily favor Idiomatic by **2-32x**
5. **Memory efficiency** strongly favors Idiomatic (especially ChainFirst: 72 B → 0 B)
6. **Real-world pipelines** show **2-3.4x speedup** with Idiomatic
### Key Insight: No Heap Escape Myth
A critical finding: **Both packages avoid heap allocations for simple operations.** The Either struct is small enough (24 bytes) that Go returns it by value on the stack, not the heap. The `0 B/op, 0 allocs/op` benchmarks confirm this.
The performance differences come from:
- **Function composition overhead** in complex operations
- **Currying and closure creation** in pipelines
- **Tuple simplicity** vs struct field access
Not from constructor allocations—both are equally efficient there.
### Final Verdict
The idiomatic package provides a compelling performance advantage for production workloads while maintaining zero-cost functional programming abstractions. The Either package remains excellent for type safety, feature richness, and scenarios where explicit Either[E, A] semantics are valuable.
**Bottom Line:**
- For **high-performance Go services**: idiomatic package is the clear winner (1.2-32x faster)
- For **type-safe, feature-rich FP**: Either package is excellent (comparable simple ops, more features)
- **Both avoid heap allocations** for constructors—choose based on your performance vs features trade-off

View File

@@ -0,0 +1,344 @@
# Deep Chaining Performance Analysis
## Executive Summary
The **only remaining performance gap** between `v2/option` and `idiomatic/option` is in **deep chaining operations** (multiple sequential transformations). This document demonstrates the problem, explains the root cause, and provides recommendations.
## Benchmark Results
### v2/option (Struct-based)
```
BenchmarkChain_3Steps 8.17 ns/op 0 allocs
BenchmarkChain_5Steps 16.57 ns/op 0 allocs
BenchmarkChain_10Steps 47.01 ns/op 0 allocs
BenchmarkMap_5Steps 0.28 ns/op 0 allocs ⚡
```
### idiomatic/option (Tuple-based)
```
BenchmarkChain_3Steps 0.22 ns/op 0 allocs ⚡
BenchmarkChain_5Steps 0.22 ns/op 0 allocs ⚡
BenchmarkChain_10Steps 0.21 ns/op 0 allocs ⚡
BenchmarkMap_5Steps 0.22 ns/op 0 allocs ⚡
```
### Performance Comparison
| Steps | v2/option | idiomatic/option | Slowdown |
|-------|-----------|------------------|----------|
| 3 | 8.17 ns | 0.22 ns | **37x slower** |
| 5 | 16.57 ns | 0.22 ns | **75x slower** |
| 10 | 47.01 ns | 0.21 ns | **224x slower** |
**Key Finding**: The performance gap **increases linearly** with chain depth!
---
## Visual Example: The Problem
### Scenario: Processing User Input
```go
// Process user input through multiple validation steps
input := "42"
// v2/option - Nested MonadChain
result := MonadChain(
MonadChain(
MonadChain(
Some(input),
validateNotEmpty, // Step 1
),
parseToInt, // Step 2
),
validateRange, // Step 3
)
```
### What Happens Under the Hood
#### v2/option (Struct Construction Overhead)
```go
// Step 0: Initial value
Some(input)
// Creates: Option[string]{value: "42", isSome: true}
// Memory: HEAP allocation
// Step 1: Validate not empty
MonadChain(opt, validateNotEmpty)
// Input: Option[string]{value: "42", isSome: true} ← Read from heap
// Output: Option[string]{value: "42", isSome: true} ← NEW heap allocation
// Memory: 2 heap allocations
// Step 2: Parse to int
MonadChain(opt, parseToInt)
// Input: Option[string]{value: "42", isSome: true} ← Read from heap
// Output: Option[int]{value: 42, isSome: true} ← NEW heap allocation
// Memory: 3 heap allocations
// Step 3: Validate range
MonadChain(opt, validateRange)
// Input: Option[int]{value: 42, isSome: true} ← Read from heap
// Output: Option[int]{value: 42, isSome: true} ← NEW heap allocation
// Memory: 4 heap allocations TOTAL
// Each step:
// 1. Reads Option struct from memory
// 2. Checks isSome field
// 3. Calls function
// 4. Creates NEW Option struct
// 5. Writes to memory
```
#### idiomatic/option (Zero Allocation)
```go
// Step 0: Initial value
s, ok := Some(input)
// Creates: ("42", true)
// Memory: STACK only (registers)
// Step 1: Validate not empty
v1, ok1 := Chain(validateNotEmpty)(s, ok)
// Input: ("42", true) ← Values in registers
// Output: ("42", true) ← Values in registers
// Memory: ZERO allocations
// Step 2: Parse to int
v2, ok2 := Chain(parseToInt)(v1, ok1)
// Input: ("42", true) ← Values in registers
// Output: (42, true) ← Values in registers
// Memory: ZERO allocations
// Step 3: Validate range
v3, ok3 := Chain(validateRange)(v2, ok2)
// Input: (42, true) ← Values in registers
// Output: (42, true) ← Values in registers
// Memory: ZERO allocations TOTAL
// Each step:
// 1. Reads values from registers (no memory access!)
// 2. Checks bool flag
// 3. Calls function
// 4. Returns new tuple (stays in registers)
// 5. Compiler optimizes everything away!
```
---
## Assembly-Level Difference
### v2/option - Struct Overhead
```asm
; Every chain step does:
MOV RAX, [heap_ptr] ; Load struct from heap
TEST BYTE [RAX+8], 1 ; Check isSome field
JZ none_case ; Branch if None
MOV RDI, [RAX] ; Load value from struct
CALL transform_func ; Call the function
CALL malloc ; Allocate new struct ⚠️
MOV [new_ptr], result ; Store result
MOV [new_ptr+8], 1 ; Set isSome = true
```
### idiomatic/option - Optimized Away
```asm
; All steps compiled to:
MOV EAX, 42 ; The final result!
; Everything else optimized away! ⚡
```
**Compiler insight**: With tuples, the Go compiler can:
1. **Inline everything** - No function call overhead
2. **Eliminate branches** - Constant propagation removes `if ok` checks
3. **Use registers only** - Values never touch memory
4. **Dead code elimination** - Removes unnecessary operations
---
## Real-World Example with Timings
### Example: User Registration Validation Chain
```go
// Validate: email → trim → lowercase → check format → check uniqueness
```
#### v2/option Performance
```go
func ValidateEmail_v2(email string) Option[string] {
return MonadChain(
MonadChain(
MonadChain(
MonadChain(
Some(email),
trimWhitespace, // ~2 ns
),
toLowerCase, // ~2 ns
),
validateFormat, // ~2 ns
),
checkUniqueness, // ~2 ns
)
}
// Total: ~8-16 ns (matches our 5-step benchmark: 16.57 ns)
```
#### idiomatic/option Performance
```go
func ValidateEmail_idiomatic(email string) (string, bool) {
v1, ok1 := Chain(trimWhitespace)(email, true)
v2, ok2 := Chain(toLowerCase)(v1, ok1)
v3, ok3 := Chain(validateFormat)(v2, ok2)
return Chain(checkUniqueness)(v3, ok3)
}
// Total: ~0.22 ns (entire chain optimized to single operation!)
```
**Impact**: For 1 million validations:
- v2/option: 16.57 ms
- idiomatic/option: 0.22 ms
- **Difference: 75x faster = saved 16.35 ms**
---
## Why Map is Fast in v2/option
Interestingly, `Map` (pure transformations) is **much faster** than `Chain`:
```
v2/option:
- BenchmarkChain_5Steps: 16.57 ns
- BenchmarkMap_5Steps: 0.28 ns ← 59x FASTER!
```
**Reason**: Map transformations can be **inlined and fused** by the compiler:
```go
// This:
Map(f5)(Map(f4)(Map(f3)(Map(f2)(Map(f1)(opt)))))
// Becomes (after compiler optimization):
Some(f5(f4(f3(f2(f1(value)))))) // Single struct construction!
// While Chain cannot be optimized the same way:
MonadChain(MonadChain(...)) // Must construct at each step
```
---
## When Does This Matter?
### ⚠️ **Rarely Critical** (99% of use cases)
Even 10-step chains only cost **47 nanoseconds**. For context:
- Database query: **~1,000,000 ns** (1 ms)
- HTTP request: **~10,000,000 ns** (10 ms)
- File I/O: **~100,000 ns** (0.1 ms)
**The 47 ns overhead is negligible compared to real I/O operations.**
### ⚡ **Can Matter** (High-throughput scenarios)
1. **In-memory data processing pipelines**
```go
// Processing 10 million records with 5-step validation
v2/option: 165 ms
idiomatic/option: 2 ms
Difference: 163 ms saved ⚡
```
2. **Real-time stream processing**
- Processing 100k events/second with chained transformations
- 16.57 ns × 100,000 = 1.66 ms vs 0.22 ns × 100,000 = 0.022 ms
- Can affect throughput for high-frequency trading, gaming, etc.
3. **Tight inner loops with chained logic**
```go
for i := 0; i < 1_000_000; i++ {
result := Chain(f1).Chain(f2).Chain(f3).Chain(f4)(data[i])
}
// v2/option: 16 ms
// idiomatic: 0.22 ms
```
---
## Root Cause Summary
| Aspect | v2/option | idiomatic/option | Why? |
|--------|-----------|------------------|------|
| **Intermediate values** | `Option[T]` struct | `(T, bool)` tuple | Struct requires memory, tuple can use registers |
| **Memory allocation** | 1 per step | 0 total | Heap vs stack |
| **Compiler optimization** | Limited | Aggressive | Structs block inlining |
| **Cache impact** | Heap reads | Register-only | Memory bandwidth saved |
| **Branch prediction** | Struct checks | Optimized away | Compiler removes branches |
---
## Recommendations
### ✅ **Use v2/option When:**
- I/O-bound operations (database, network, files)
- User-facing applications (latency dominated by I/O)
- Need JSON marshaling, TryCatch, SequenceArray
- Chain depth < 5 steps (overhead < 20 ns - negligible)
- Code clarity > microsecond performance
### ✅ **Use idiomatic/option When:**
- CPU-bound data processing
- High-throughput stream processing
- Tight inner loops with chaining
- In-memory analytics
- Performance-critical paths
- Chain depth > 5 steps
### ✅ **Mitigation for v2/option:**
If you need v2/option but want better chain performance:
1. **Use Map instead of Chain** when possible:
```go
// Bad (16.57 ns):
MonadChain(MonadChain(MonadChain(opt, f1), f2), f3)
// Good (0.28 ns):
Map(f3)(Map(f2)(Map(f1)(opt)))
```
2. **Batch operations**:
```go
// Instead of chaining many steps:
validate := func(x T) Option[T] {
// Combine multiple checks in one function
if check1(x) && check2(x) && check3(x) {
return Some(transform(x))
}
return None[T]()
}
```
3. **Profile first**:
- Only optimize hot paths
- 47 ns is often acceptable
- Don't premature optimize
---
## Conclusion
**The deep chaining performance gap is:**
- ✅ **Real and measurable** (37-224x slower)
- ✅ **Well understood** (struct construction overhead)
- ⚠️ **Rarely critical** (nanosecond differences usually don't matter)
- ✅ **Easy to work around** (use Map, batch operations)
- ✅ **Worth it for the API benefits** (JSON, methods, helpers)
**For 99% of applications, v2/option's performance is excellent.** The gap only matters in specialized high-throughput scenarios where you should probably use idiomatic/option anyway.
The optimizations already applied (`//go:inline`, direct field access) brought v2/option to **competitive parity** for all practical purposes. The remaining gap is a **fundamental design trade-off**, not a fixable bug.

574
v2/DESIGN.md Normal file
View File

@@ -0,0 +1,574 @@
# Design Decisions
This document explains the key design decisions and principles behind fp-go's API design.
## Table of Contents
- [Data Last Principle](#data-last-principle)
- [Kleisli and Operator Types](#kleisli-and-operator-types)
- [Monadic Operations Comparison](#monadic-operations-comparison)
- [Type Parameter Ordering](#type-parameter-ordering)
- [Generic Type Aliases](#generic-type-aliases)
## Data Last Principle
fp-go follows the **"data last"** principle, where the data being operated on is always the last parameter in a function. This design choice enables powerful function composition and partial application patterns.
### What is "Data Last"?
In the "data last" style, functions are structured so that:
1. Configuration parameters come first
2. The data to be transformed comes last
This is the opposite of the traditional object-oriented style where the data (receiver) comes first.
### Why "Data Last"?
The "data last" principle enables:
1. **Natural Currying**: Functions can be partially applied to create specialized transformations
2. **Function Composition**: Operations can be composed before applying them to data
3. **Point-Free Style**: Write transformations without explicitly mentioning the data
4. **Reusability**: Create reusable transformation pipelines
### Examples
#### Basic Transformation
```go
// Data last style (fp-go)
double := array.Map(number.Mul(2))
result := double([]int{1, 2, 3}) // [2, 4, 6]
// Compare with data first style (traditional)
result := array.Map([]int{1, 2, 3}, number.Mul(2))
```
#### Function Composition
```go
import (
A "github.com/IBM/fp-go/v2/array"
F "github.com/IBM/fp-go/v2/function"
N "github.com/IBM/fp-go/v2/number"
)
// Create a pipeline of transformations
pipeline := F.Flow3(
A.Filter(func(x int) bool { return x > 0 }), // Keep positive numbers
A.Map(N.Mul(2)), // Double each number
A.Reduce(func(acc, x int) int { return acc + x }, 0), // Sum them up
)
// Apply the pipeline to different data
result1 := pipeline([]int{-1, 2, 3, -4, 5}) // (2 + 3 + 5) * 2 = 20
result2 := pipeline([]int{1, 2, 3}) // (1 + 2 + 3) * 2 = 12
```
#### Partial Application
```go
import (
O "github.com/IBM/fp-go/v2/option"
)
// Create specialized functions by partial application
getOrZero := O.GetOrElse(func() int { return 0 })
getOrEmpty := O.GetOrElse(func() string { return "" })
// Use them with different data
value1 := getOrZero(O.Some(42)) // 42
value2 := getOrZero(O.None[int]()) // 0
text1 := getOrEmpty(O.Some("hello")) // "hello"
text2 := getOrEmpty(O.None[string]()) // ""
```
#### Building Reusable Transformations
```go
import (
E "github.com/IBM/fp-go/v2/either"
O "github.com/IBM/fp-go/v2/option"
)
// Create a reusable validation pipeline
type User struct {
Name string
Email string
Age int
}
validateAge := E.FromPredicate(
func(u User) bool { return u.Age >= 18 },
func(u User) error { return errors.New("must be 18 or older") },
)
validateEmail := E.FromPredicate(
func(u User) bool { return strings.Contains(u.Email, "@") },
func(u User) error { return errors.New("invalid email") },
)
// Compose validators
validateUser := F.Flow2(
validateAge,
E.Chain(validateEmail),
)
// Apply to different users
result1 := validateUser(User{Name: "Alice", Email: "alice@example.com", Age: 25})
result2 := validateUser(User{Name: "Bob", Email: "invalid", Age: 30})
```
#### Monadic Operations
```go
import (
O "github.com/IBM/fp-go/v2/option"
)
// Data last enables clean monadic chains
parseAndDouble := F.Flow2(
O.FromPredicate(func(s string) bool { return s != "" }),
O.Chain(func(s string) O.Option[int] {
n, err := strconv.Atoi(s)
if err != nil {
return O.None[int]()
}
return O.Some(n * 2)
}),
)
result1 := parseAndDouble("21") // Some(42)
result2 := parseAndDouble("") // None
result3 := parseAndDouble("abc") // None
```
### Monadic vs Non-Monadic Forms
fp-go provides two forms for most operations:
1. **Curried form** (data last): Returns a function that can be composed
2. **Monadic form** (data first): Takes all parameters at once
```go
// Curried form - data last, returns a function
Map[A, B any](f func(A) B) func(Option[A]) Option[B]
// Monadic form - data first, direct execution
MonadMap[A, B any](fa Option[A], f func(A) B) Option[B]
```
**When to use each:**
- **Curried form**: When building pipelines, composing functions, or creating reusable transformations
- **Monadic form**: When you have all parameters available and want direct execution
```go
// Curried form - building a pipeline
transform := F.Flow3(
O.Map(strings.ToUpper),
O.Filter(func(s string) bool { return len(s) > 3 }),
O.GetOrElse(func() string { return "DEFAULT" }),
)
result := transform(O.Some("hello"))
// Monadic form - direct execution
result := O.MonadMap(O.Some("hello"), strings.ToUpper)
```
### Further Reading on Data-Last Pattern
The data-last currying pattern is well-documented in the functional programming community:
- [Mostly Adequate Guide - Ch. 4: Currying](https://mostly-adequate.gitbook.io/mostly-adequate-guide/ch04) - Excellent introduction with clear examples
- [Curry and Function Composition](https://medium.com/javascript-scene/curry-and-function-composition-2c208d774983) by Eric Elliott
- [fp-ts Issue #1238](https://github.com/gcanti/fp-ts/issues/1238) - Real-world examples of data-last refactoring
## Kleisli and Operator Types
fp-go uses consistent type aliases across all monads to make code more recognizable and composable. These types provide a common vocabulary that works across different monadic contexts.
### Type Definitions
```go
// Kleisli arrow - a function that returns a monadic value
type Kleisli[A, B any] = func(A) M[B]
// Operator - a function that transforms a monadic value
type Operator[A, B any] = func(M[A]) M[B]
```
Where `M` represents the specific monad (Option, Either, IO, etc.).
### Why These Types Matter
1. **Consistency**: The same type names appear across all monads
2. **Recognizability**: Experienced functional programmers immediately understand the intent
3. **Composability**: Functions with these types compose naturally
4. **Documentation**: Type signatures clearly communicate the operation's behavior
### Examples Across Monads
#### Option Monad
```go
// option/option.go
type Kleisli[A, B any] = func(A) Option[B]
type Operator[A, B any] = func(Option[A]) Option[B]
// Chain uses Kleisli
func Chain[A, B any](f Kleisli[A, B]) Operator[A, B]
// Map returns an Operator
func Map[A, B any](f func(A) B) Operator[A, B]
```
#### Either Monad
```go
// either/either.go
type Kleisli[E, A, B any] = func(A) Either[E, B]
type Operator[E, A, B any] = func(Either[E, A]) Either[E, B]
// Chain uses Kleisli
func Chain[E, A, B any](f Kleisli[E, A, B]) Operator[E, A, B]
// Map returns an Operator
func Map[E, A, B any](f func(A) B) Operator[E, A, B]
```
#### IO Monad
```go
// io/io.go
type Kleisli[A, B any] = func(A) IO[B]
type Operator[A, B any] = func(IO[A]) IO[B]
// Chain uses Kleisli
func Chain[A, B any](f Kleisli[A, B]) Operator[A, B]
// Map returns an Operator
func Map[A, B any](f func(A) B) Operator[A, B]
```
#### Array (List Monad)
```go
// array/array.go
type Kleisli[A, B any] = func(A) []B
type Operator[A, B any] = func([]A) []B
// Chain uses Kleisli
func Chain[A, B any](f Kleisli[A, B]) Operator[A, B]
// Map returns an Operator
func Map[A, B any](f func(A) B) Operator[A, B]
```
### Pattern Recognition
Once you learn these patterns in one monad, you can apply them to all monads:
```go
// The pattern is always the same, just the monad changes
// Option
validateAge := option.Chain(func(user User) option.Option[User] {
if user.Age >= 18 {
return option.Some(user)
}
return option.None[User]()
})
// Either
validateAge := either.Chain(func(user User) either.Either[error, User] {
if user.Age >= 18 {
return either.Right[error](user)
}
return either.Left[User](errors.New("too young"))
})
// IO
validateAge := io.Chain(func(user User) io.IO[User] {
return io.Of(user) // Always succeeds in IO
})
// Array
validateAge := array.Chain(func(user User) []User {
if user.Age >= 18 {
return []User{user}
}
return []User{} // Empty array = failure
})
```
### Composing Kleisli Arrows
Kleisli arrows compose naturally using monadic composition:
```go
import (
O "github.com/IBM/fp-go/v2/option"
F "github.com/IBM/fp-go/v2/function"
)
// Define Kleisli arrows
parseAge := func(s string) O.Option[int] {
n, err := strconv.Atoi(s)
if err != nil {
return O.None[int]()
}
return O.Some(n)
}
validateAge := func(age int) O.Option[int] {
if age >= 18 {
return O.Some(age)
}
return O.None[int]()
}
formatAge := func(age int) O.Option[string] {
return O.Some(fmt.Sprintf("Age: %d", age))
}
// Compose them using Flow and Chain
pipeline := F.Flow3(
parseAge,
O.Chain(validateAge),
O.Chain(formatAge),
)
result := pipeline("25") // Some("Age: 25")
result := pipeline("15") // None (too young)
result := pipeline("abc") // None (parse error)
```
### Building Reusable Operators
Operators can be created once and reused across your codebase:
```go
import (
E "github.com/IBM/fp-go/v2/either"
)
// Create reusable operators
type ValidationError struct {
Field string
Message string
}
// Reusable validation operators
validateNonEmpty := E.Chain(func(s string) E.Either[ValidationError, string] {
if s == "" {
return E.Left[string](ValidationError{
Field: "input",
Message: "cannot be empty",
})
}
return E.Right[ValidationError](s)
})
validateEmail := E.Chain(func(s string) E.Either[ValidationError, string] {
if !strings.Contains(s, "@") {
return E.Left[string](ValidationError{
Field: "email",
Message: "invalid format",
})
}
return E.Right[ValidationError](s)
})
// Compose operators
validateEmailInput := F.Flow2(
validateNonEmpty,
validateEmail,
)
// Use across your application
result1 := validateEmailInput(E.Right[ValidationError]("user@example.com"))
result2 := validateEmailInput(E.Right[ValidationError](""))
result3 := validateEmailInput(E.Right[ValidationError]("invalid"))
```
### Benefits of Consistent Naming
1. **Cross-monad understanding**: Learn once, apply everywhere
2. **Easier refactoring**: Changing monads requires minimal code changes
3. **Better tooling**: IDEs can provide better suggestions
4. **Team communication**: Shared vocabulary across the team
5. **Library integration**: Third-party libraries follow the same patterns
### Identity Monad - The Simplest Case
The Identity monad shows these types in their simplest form:
```go
// identity/doc.go
type Operator[A, B any] = func(A) B
// In Identity, there's no wrapping, so:
// - Kleisli[A, B] is just func(A) B
// - Operator[A, B] is just func(A) B
// They're the same because Identity adds no context
```
This demonstrates that these type aliases represent fundamental functional programming concepts, not just arbitrary naming conventions.
## Monadic Operations Comparison
fp-go's monadic operations are inspired by functional programming languages and libraries. Here's how they compare:
| fp-go | fp-ts | Haskell | Scala | Description |
|-------|-------|---------|-------|-------------|
| `Map` | `map` | `fmap` | `map` | Functor mapping - transforms the value inside a context |
| `Chain` | `chain` | `>>=` (bind) | `flatMap` | Monadic bind - chains computations that return wrapped values |
| `Ap` | `ap` | `<*>` | `ap` | Applicative apply - applies a wrapped function to a wrapped value |
| `Of` | `of` | `return`/`pure` | `pure` | Lifts a pure value into a monadic context |
| `Fold` | `fold` | `either` | `fold` | Eliminates the context by providing handlers for each case |
| `Filter` | `filter` | `mfilter` | `filter` | Keeps values that satisfy a predicate |
| `Flatten` | `flatten` | `join` | `flatten` | Removes one level of nesting |
| `ChainFirst` | `chainFirst` | `>>` (then) | `tap` | Chains for side effects, keeping the original value |
| `Alt` | `alt` | `<\|>` | `orElse` | Provides an alternative value if the first fails |
| `GetOrElse` | `getOrElse` | `fromMaybe` | `getOrElse` | Extracts the value or provides a default |
| `FromPredicate` | `fromPredicate` | `guard` | `filter` | Creates a monadic value based on a predicate |
| `Sequence` | `sequence` | `sequence` | `sequence` | Transforms a collection of effects into an effect of a collection |
| `Traverse` | `traverse` | `traverse` | `traverse` | Maps and sequences in one operation |
| `Reduce` | `reduce` | `foldl` | `foldLeft` | Folds a structure from left to right |
| `ReduceRight` | `reduceRight` | `foldr` | `foldRight` | Folds a structure from right to left |
### Key Differences from Other Languages
#### Naming Conventions
- **Go conventions**: fp-go uses PascalCase for exported functions (e.g., `Map`, `Chain`) following Go's naming conventions
- **Type parameters first**: Non-inferrable type parameters come first (e.g., `Ap[B, E, A any]`)
- **Monadic prefix**: Direct execution forms use the `Monad` prefix (e.g., `MonadMap`, `MonadChain`)
#### Type System
```go
// fp-go (explicit type parameters when needed)
result := option.Map(transform)(value)
result := option.Map[string, int](transform)(value) // explicit when inference fails
// Haskell (type inference)
result = fmap transform value
// Scala (type inference with method syntax)
result = value.map(transform)
// fp-ts (TypeScript type inference)
const result = pipe(value, map(transform))
```
#### Currying
```go
// fp-go - explicit currying with data last
double := array.Map(number.Mul(2))
result := double(numbers)
// Haskell - automatic currying
double = fmap (*2)
result = double numbers
// Scala - method syntax
result = numbers.map(_ * 2)
```
## Type Parameter Ordering
fp-go v2 uses a specific ordering for type parameters to maximize type inference:
### Rule: Non-Inferrable Parameters First
Type parameters that **cannot be inferred** from function arguments come first. This allows the Go compiler to infer as many types as possible.
```go
// Ap - B cannot be inferred from arguments, so it comes first
func Ap[B, E, A any](fa Either[E, A]) func(Either[E, func(A) B]) Either[E, B]
// Usage - only B needs to be specified
result := either.Ap[string](value)(funcInEither)
```
### Examples
```go
// Map - all types can be inferred from arguments
func Map[E, A, B any](f func(A) B) func(Either[E, A]) Either[E, B]
// Usage - no type parameters needed
result := either.Map(transform)(value)
// Chain - all types can be inferred
func Chain[E, A, B any](f func(A) Either[E, B]) func(Either[E, A]) Either[E, B]
// Usage - no type parameters needed
result := either.Chain(validator)(value)
// Of - E cannot be inferred, comes first
func Of[E, A any](value A) Either[E, A]
// Usage - only E needs to be specified
result := either.Of[error](42)
```
### Benefits
1. **Less verbose code**: Most operations don't require explicit type parameters
2. **Better IDE support**: Type inference provides better autocomplete
3. **Clearer intent**: Only specify types that can't be inferred
## Generic Type Aliases
fp-go v2 leverages Go 1.24's generic type aliases for cleaner type definitions:
```go
// V2 - using generic type alias (requires Go 1.24+)
type ReaderIOEither[R, E, A any] = RD.Reader[R, IOE.IOEither[E, A]]
// V1 - using type definition (Go 1.18+)
type ReaderIOEither[R, E, A any] RD.Reader[R, IOE.IOEither[E, A]]
```
### Benefits
1. **True aliases**: The type is interchangeable with its definition
2. **No namespace imports needed**: Can use types directly without package prefixes
3. **Simpler codebase**: Eliminates the need for `generic` subpackages
4. **Better composability**: Types compose more naturally
### Migration Pattern
```go
// Define project-wide aliases once
package types
import (
"github.com/IBM/fp-go/v2/option"
"github.com/IBM/fp-go/v2/result"
"github.com/IBM/fp-go/v2/ioresult"
)
type Option[A any] = option.Option[A]
type Result[A any] = result.Result[A]
type IOResult[A any] = ioresult.IOResult[A]
// Use throughout your codebase
package myapp
import "myproject/types"
func process(input string) types.Result[types.Option[int]] {
// implementation
}
```
---
For more information, see:
- [README.md](./README.md) - Overview and quick start
- [API Documentation](https://pkg.go.dev/github.com/IBM/fp-go/v2) - Complete API reference
- [Samples](./samples/) - Practical examples

View File

@@ -0,0 +1,212 @@
# Example Tests Progress
This document tracks the progress of converting documentation examples into executable example test files.
## Overview
The codebase has 300+ documentation examples across many packages. This document tracks which packages have been completed and which still need work.
## Completed Packages
### Core Packages
- [x] **result** - Created `examples_bind_test.go`, `examples_curry_test.go`, `examples_apply_test.go`
- Files: `bind.go` (10 examples), `curry.go` (5 examples), `apply.go` (2 examples)
- Status: ✅ 17 tests passing
### Utility Packages
- [x] **pair** - Created `examples_test.go`
- Files: `pair.go` (14 examples)
- Status: ✅ 14 tests passing
- [x] **tuple** - Created `examples_test.go`
- Files: `tuple.go` (6 examples)
- Status: ✅ 6 tests passing
### Type Class Packages
- [x] **semigroup** - Created `examples_test.go`
- Files: `semigroup.go` (7 examples)
- Status: ✅ 7 tests passing
### Utility Packages (continued)
- [x] **predicate** - Created `examples_test.go`
- Files: `bool.go` (3 examples), `contramap.go` (1 example)
- Status: ✅ 4 tests passing
### Context Reader Packages
- [x] **idiomatic/context/readerresult** - Created `examples_reader_test.go`, `examples_bind_test.go`
- Files: `reader.go` (8 examples), `bind.go` (14 examples)
- Status: ✅ 22 tests passing
## Summary Statistics
- **Total Example Tests Created**: 74
- **Total Packages Completed**: 7 (result, pair, tuple, semigroup, predicate, idiomatic/context/readerresult)
- **All Tests Status**: ✅ PASSING
### Breakdown by Package
- **result**: 21 tests (bind: 10, curry: 5, apply: 2, array: 4)
- **pair**: 14 tests
- **tuple**: 6 tests
- **semigroup**: 7 tests
- **predicate**: 4 tests
- **idiomatic/context/readerresult**: 22 tests (reader: 8, bind: 14)
## Packages with Existing Examples
These packages already have some example test files:
- result (has `examples_create_test.go`, `examples_extract_test.go`)
- option (has `examples_create_test.go`, `examples_extract_test.go`)
- either (has `examples_create_test.go`, `examples_extract_test.go`)
- ioeither (has `examples_create_test.go`, `examples_do_test.go`, `examples_extract_test.go`)
- ioresult (has `examples_create_test.go`, `examples_do_test.go`, `examples_extract_test.go`)
- lazy (has `example_lazy_test.go`)
- array (has `examples_basic_test.go`, `examples_sort_test.go`, `example_any_test.go`, `example_find_test.go`)
- readerioeither (has `traverse_example_test.go`)
- context/readerioresult (has `flip_example_test.go`)
## Packages Needing Example Tests
### Core Packages (High Priority)
- [ ] **result** - Additional files need examples:
- `apply.go` (2 examples)
- `array.go` (7 examples)
- `core.go` (6 examples)
- `either.go` (26 examples)
- `eq.go` (2 examples)
- `functor.go` (1 example)
- [ ] **option** - Additional files need examples
- [ ] **either** - Additional files need examples
### Reader Packages (High Priority)
- [ ] **reader** - Many examples in:
- `array.go` (12 examples)
- `bind.go` (10 examples)
- `curry.go` (8 examples)
- `flip.go` (2 examples)
- `reader.go` (21 examples)
- [ ] **readeroption** - Examples in:
- `array.go` (3 examples)
- `bind.go` (7 examples)
- `curry.go` (5 examples)
- `flip.go` (2 examples)
- `from.go` (4 examples)
- `reader.go` (18 examples)
- `sequence.go` (4 examples)
- [ ] **readerresult** - Examples in:
- `array.go` (3 examples)
- `bind.go` (24 examples)
- `curry.go` (7 examples)
- `flip.go` (2 examples)
- `from.go` (4 examples)
- `monoid.go` (3 examples)
- [ ] **readereither** - Examples in:
- `array.go` (3 examples)
- `bind.go` (7 examples)
- `flip.go` (3 examples)
- [ ] **readerio** - Examples in:
- `array.go` (3 examples)
- `bind.go` (7 examples)
- `flip.go` (2 examples)
- `logging.go` (4 examples)
- `reader.go` (30 examples)
- [ ] **readerioeither** - Examples in:
- `bind.go` (7 examples)
- `flip.go` (1 example)
- [ ] **readerioresult** - Examples in:
- `array.go` (8 examples)
- `bind.go` (24 examples)
### State Packages
- [ ] **statereaderioeither** - Examples in:
- `bind.go` (5 examples)
- `resource.go` (1 example)
- `state.go` (13 examples)
### Utility Packages
- [ ] **lazy** - Additional examples in:
- `apply.go` (2 examples)
- `bind.go` (7 examples)
- `lazy.go` (10 examples)
- `sequence.go` (4 examples)
- `traverse.go` (2 examples)
- [ ] **pair** - Additional examples in:
- `monad.go` (12 examples)
- `pair.go` (remaining ~20 examples)
- [ ] **tuple** - Examples in:
- `tuple.go` (6 examples)
- [ ] **predicate** - Examples in:
- `bool.go` (3 examples)
- `contramap.go` (1 example)
- `monoid.go` (4 examples)
- [ ] **retry** - Examples in:
- `retry.go` (7 examples)
- [ ] **logging** - Examples in:
- `logger.go` (5 examples)
### Collection Packages
- [ ] **record** - Examples in:
- `bind.go` (3 examples)
### Type Class Packages
- [ ] **semigroup** - Examples in:
- `alt.go` (1 example)
- `apply.go` (1 example)
- `array.go` (4 examples)
- `semigroup.go` (7 examples)
- [ ] **ord** - Examples in:
- `ord.go` (1 example)
## Strategy for Completion
1. **Prioritize by usage**: Focus on core packages (result, option, either) first
2. **Group by package**: Complete all examples for one package before moving to next
3. **Test incrementally**: Run tests after each file to catch errors early
4. **Follow patterns**: Use existing example test files as templates
5. **Document as you go**: Update this file with progress
## Example Test File Template
```go
// Copyright header...
package packagename_test
import (
"fmt"
PKG "github.com/IBM/fp-go/v2/packagename"
)
func ExampleFunctionName() {
// Copy example from doc comment
// Ensure it compiles and produces correct output
fmt.Println(result)
// Output:
// expected output
}
```
## Notes
- Use `F.Constant1[error](defaultValue)` for GetOrElse in result package
- Use `F.Pipe1` instead of `F.Pipe2` when only one transformation
- Check function signatures carefully for type parameters
- Some functions like `BiMap` are capitalized differently than in docs
- **Prefer `R.Eitherize1(func)` over manual error handling** - converts `func(T) (R, error)` to `func(T) Result[R]`
- Example: Use `R.Eitherize1(strconv.Atoi)` instead of manual if/else error checking
- **Add Go documentation comments to all example functions** - Each example should have a comment explaining what it demonstrates
- **Idiomatic vs Non-Idiomatic packages**:
- Non-idiomatic (e.g., `result`): Uses `Result[A]` type (Either monad)
- Idiomatic (e.g., `idiomatic/result`): Uses `(A, error)` tuples (Go-style)
- Context readers use non-idiomatic `Result[A]` internally

816
v2/IDIOMATIC_COMPARISON.md Normal file
View File

@@ -0,0 +1,816 @@
# Idiomatic vs Standard Package Comparison
> **Latest Update:** 2025-11-18 - Updated with fresh benchmarks after `either` package optimizations
This document provides a comprehensive comparison between the `idiomatic` packages and the standard fp-go packages (`result` and `option`).
**See also:** [BENCHMARK_COMPARISON.md](./BENCHMARK_COMPARISON.md) for detailed performance analysis.
## Table of Contents
1. [Overview](#overview)
2. [Design Differences](#design-differences)
3. [Performance Comparison](#performance-comparison)
4. [API Comparison](#api-comparison)
5. [When to Use Each](#when-to-use-each)
## Overview
The fp-go library provides two approaches to functional programming patterns in Go:
- **Standard Packages** (`result`, `either`, `option`): Use struct wrappers for algebraic data types
- **Idiomatic Packages** (`idiomatic/result`, `idiomatic/option`): Use native Go tuples for the same patterns
### Key Insight
After recent optimizations to the `either` package, both approaches now offer excellent performance:
- **Simple operations** (~1-5 ns/op): Both packages perform comparably
- **Core transformations**: Idiomatic is **1.2-2.3x faster**
- **Complex operations**: Idiomatic is **2-32x faster** with significantly fewer allocations
- **Real-world pipelines**: Idiomatic shows **2-3.4x speedup**
The idiomatic packages provide:
- Consistently better performance across most operations
- Zero allocations for complex operations (ChainFirst: 72 B → 0 B)
- More familiar Go idioms
- Seamless integration with existing Go code
## Design Differences
### Data Representation
#### Standard Result Package
```go
// Uses Either[error, A] which is a struct wrapper
type Result[A any] = Either[error, A]
type Either[E, A any] struct {
r A
l E
isLeft bool
}
// Creating values - ZERO heap allocations (struct returned by value)
success := result.Right[error](42) // Returns Either struct by value (0 B/op)
failure := result.Left[int](err) // Returns Either struct by value (0 B/op)
// Benchmarks confirm:
// BenchmarkRight-16 871258489 1.384 ns/op 0 B/op 0 allocs/op
// BenchmarkLeft-16 683089270 1.761 ns/op 0 B/op 0 allocs/op
```
#### Idiomatic Result Package
```go
// Uses native Go tuples (value, error)
type Kleisli[A, B any] = func(A) (B, error)
type Operator[A, B any] = func(A, error) (B, error)
// Creating values - ZERO allocations (tuples on stack)
success := result.Right(42) // Returns (42, nil) - 0 B/op
failure := result.Left[int](err) // Returns (0, err) - 0 B/op
// Benchmarks confirm:
// BenchmarkRight-16 789879016 1.427 ns/op 0 B/op 0 allocs/op
// BenchmarkLeft-16 895412131 1.349 ns/op 0 B/op 0 allocs/op
```
### Type Signatures
#### Standard Result
```go
// Functions take and return Result[T] structs
func Map[A, B any](f func(A) B) func(Result[A]) Result[B]
func Chain[A, B any](f Kleisli[A, B]) func(Result[A]) Result[B]
func Fold[A, B any](onLeft func(error) B, onRight func(A) B) func(Result[A]) B
// Usage requires wrapping/unwrapping
result := result.Right[error](42)
mapped := result.Map(double)(result)
value, err := result.UnwrapError(mapped)
```
#### Idiomatic Result
```go
// Functions work directly with tuples
func Map[A, B any](f func(A) B) func(A, error) (B, error)
func Chain[A, B any](f Kleisli[A, B]) func(A, error) (B, error)
func Fold[A, B any](onLeft func(error) B, onRight func(A) B) func(A, error) B
// Usage works naturally with Go's error handling
value, err := result.Right(42)
value, err = result.Map(double)(value, err)
// Can use directly: if err != nil { ... }
```
### Memory Layout
#### Standard Result (struct-based)
```
Either[error, int] struct (returned by value):
┌─────────────────────┐
│ r: int (8B) │ Stack allocation: 24 bytes
│ l: error (8B) │ NO heap allocation when returned by value
│ isLeft: bool (1B) │ Benchmarks show 0 B/op, 0 allocs/op
│ padding (7B) │
└─────────────────────┘
Key insight: Go returns small structs (<= ~64 bytes) by value on the stack.
The Either struct (24 bytes) does NOT escape to heap in normal usage.
```
#### Idiomatic Result (tuple-based)
```
(int, error) tuple:
┌─────────────────────┐
│ int: 8 bytes │ Stack allocation: 16 bytes
│ error: 8 bytes │ NO heap allocation
└─────────────────────┘
Both approaches achieve zero heap allocations for constructor operations!
```
### Why Both Have Zero Allocations
Both packages avoid heap allocations for simple operations:
**Standard Either/Result:**
- `Either` struct is small (24 bytes)
- Go returns by value on the stack
- Inlining eliminates function call overhead
- Result: `0 B/op, 0 allocs/op`
**Idiomatic Result:**
- Tuples are native Go multi-value returns
- Always on stack, never heap
- Even simpler than structs
- Result: `0 B/op, 0 allocs/op`
**When Either WOULD escape to heap:**
```go
// Taking address of local Either
func bad1() *Either[error, int] {
e := Right[error](42)
return &e // ESCAPES: pointer to local
}
// Storing in interface
func bad2() interface{} {
return Right[error](42) // ESCAPES: interface boxing
}
// Closure capture with pointer receiver
func bad3() func() Either[error, int] {
e := Right[error](42)
return func() Either[error, int] {
return e // May escape depending on usage
}
}
```
In normal functional composition (Map, Chain, Fold), neither package causes heap allocations for simple operations.
## Performance Comparison
> **Latest benchmarks:** 2025-11-18 after `either` package optimizations
>
> For detailed analysis, see [BENCHMARK_COMPARISON.md](./BENCHMARK_COMPARISON.md)
### Quick Summary (Either vs Idiomatic)
Both packages now show **excellent performance** after optimizations:
| Category | Either | Idiomatic | Winner | Speedup |
|----------|--------|-----------|--------|---------|
| **Constructors** | 1.4-1.8 ns/op | 1.2-1.4 ns/op | **TIE** | ~1.0-1.3x |
| **Predicates** | 1.5 ns/op | 1.3-1.5 ns/op | **TIE** | ~1.0x |
| **Map Operations** | 4.2-7.2 ns/op | 2.5-4.3 ns/op | **Idiomatic** | 1.2-2.1x |
| **Chain Operations** | 4.4-5.4 ns/op | 2.3-2.5 ns/op | **Idiomatic** | 1.8-2.3x |
| **ChainFirst** | **87.6 ns/op** (72 B) | **2.7 ns/op** (0 B) | **Idiomatic** | **32.4x** ✓✓✓ |
| **BiMap** | 11.5-16.8 ns/op | 3.5-3.8 ns/op | **Idiomatic** | 3.3-4.4x |
| **Alt/OrElse** | 4.0-5.7 ns/op | 2.4 ns/op | **Idiomatic** | 1.6-2.4x |
| **GetOrElse** | 6.3-9.0 ns/op | 1.5-2.1 ns/op | **Idiomatic** | 3.1-6.1x |
| **Pipelines** | 75-280 ns/op | 26-116 ns/op | **Idiomatic** | 2.4-3.4x |
### Constructor Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Winner |
|-----------|----------------|-------------------|---------|--------|
| Left | 1.76 | **1.35** | 1.3x | Idiomatic ✓ |
| Right | 1.38 | 1.43 | ~1.0x | Tie |
| Of | 1.68 | **1.22** | 1.4x | Idiomatic ✓ |
**Analysis:** After optimizations, both packages have comparable constructor performance.
### Core Transformation Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Winner |
|------------------|----------------|-------------------|---------|--------|
| Map (Right) | 5.13 | **4.34** | 1.2x | Idiomatic ✓ |
| Map (Left) | 4.19 | **2.48** | 1.7x | Idiomatic ✓ |
| MapLeft (Right) | 3.93 | **2.22** | 1.8x | Idiomatic ✓ |
| MapLeft (Left) | 7.22 | **3.51** | 2.1x | Idiomatic ✓ |
| Chain (Right) | 5.44 | **2.34** | 2.3x | Idiomatic ✓ |
| Chain (Left) | 4.44 | **2.53** | 1.8x | Idiomatic ✓ |
### Complex Operations - The Big Difference
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------------------|----------------|-------------------|---------|---------------|-------------|
| **ChainFirst (Right)** | **87.62** | **2.71** | **32.4x** ✓✓✓ | 72 B, 3 allocs | **0 B, 0 allocs** |
| ChainFirst (Left) | 3.94 | 2.48 | 1.6x | 0 B | 0 B |
| BiMap (Right) | 16.79 | **3.82** | 4.4x | 0 B | 0 B |
| BiMap (Left) | 11.47 | **3.47** | 3.3x | 0 B | 0 B |
**Critical Insight:** ChainFirst shows the most dramatic difference - **32x faster** with **zero allocations** in idiomatic.
### Pipeline Benchmarks (Real-World Scenarios)
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Either Allocs | Idio Allocs |
|-----------|----------------|-------------------|---------|---------------|-------------|
| Pipeline Map (Right) | 112.7 | **46.5** | **2.4x** ✓ | 72 B, 3 allocs | 48 B, 2 allocs |
| Pipeline Chain (Right) | 74.4 | **26.1** | **2.9x** ✓ | 48 B, 2 allocs | 24 B, 1 alloc |
| Pipeline Complex (Right)| 279.8 | **116.3** | **2.4x** ✓ | 192 B, 8 allocs | 120 B, 5 allocs |
**Analysis:** In realistic composition scenarios, idiomatic is consistently 2-3x faster with fewer allocations.
### Extraction Operations
| Operation | Either (ns/op) | Idiomatic (ns/op) | Speedup | Winner |
|-----------|----------------|-------------------|---------|--------|
| GetOrElse (Right) | 9.01 | **1.49** | **6.1x** ✓✓ | Idiomatic |
| GetOrElse (Left) | 6.35 | **2.08** | **3.1x** ✓✓ | Idiomatic |
| Alt (Right) | 5.72 | **2.40** | **2.4x** ✓ | Idiomatic |
| Alt (Left) | 4.89 | **2.39** | **2.0x** ✓ | Idiomatic |
| Fold (Right) | 4.03 | **2.75** | **1.5x** ✓ | Idiomatic |
| Fold (Left) | 3.69 | **2.40** | **1.5x** ✓ | Idiomatic |
**Analysis:** Idiomatic shows significant advantages (1.5-6x) for value extraction operations.
### Key Findings After Optimizations
1. **Both packages are now fast** - Simple operations are in the 1-5 ns/op range for both
2. **Idiomatic leads in most operations** - 1.2-2.3x faster for common transformations
3. **ChainFirst is the standout** - 32x faster with zero allocations in idiomatic
4. **Pipelines favor idiomatic** - 2-3.4x faster in realistic composition scenarios
5. **Memory efficiency** - Idiomatic consistently uses fewer allocations
### Performance Summary
**Idiomatic Advantages:**
- **Core operations**: 1.2-2.3x faster for Map, Chain, Fold
- **Complex operations**: 3-32x faster with zero allocations
- **Pipelines**: 2-3.4x faster with significantly fewer allocations
- **Extraction**: 1.5-6x faster for GetOrElse, Alt, Fold
- **Consistency**: Predictable, fast performance across all operations
**Either Advantages:**
- **Comparable performance**: After optimizations, matches idiomatic for simple operations
- **Feature richness**: More operations (Do-notation, Bind, Let, Flatten, Swap)
- **Type flexibility**: Full Either[E, A] with custom error types
- **Zero allocations**: Most simple operations have zero allocations
## API Comparison
### Creating Values
#### Standard Result
```go
import "github.com/IBM/fp-go/v2/result"
// Create success/failure
success := result.Right[error](42)
failure := result.Left[int](errors.New("oops"))
// Type annotation required
var r result.Result[int] = result.Right[error](42)
```
#### Idiomatic Result
```go
import "github.com/IBM/fp-go/v2/idiomatic/result"
// Create success/failure (more concise)
success := result.Right(42) // (42, nil)
failure := result.Left[int](errors.New("oops")) // (0, error)
// Native Go pattern
value, err := result.Right(42)
if err != nil {
// handle error
}
```
### Transforming Values
#### Standard Result
```go
// Map transforms the success value
double := result.Map(N.Mul(2))
result := double(result.Right[error](21)) // Right(42)
// Chain sequences operations
validate := result.Chain(func(x int) result.Result[int] {
if x > 0 {
return result.Right[error](x * 2)
}
return result.Left[int](errors.New("negative"))
})
```
#### Idiomatic Result
```go
// Map transforms the success value
double := result.Map(N.Mul(2))
value, err := double(21, nil) // (42, nil)
// Chain sequences operations
validate := result.Chain(func(x int) (int, error) {
if x > 0 {
return x * 2, nil
}
return 0, errors.New("negative")
})
```
### Pattern Matching
#### Standard Result
```go
// Fold extracts the value
output := result.Fold(
func(err error) string { return "Error: " + err.Error() },
func(n int) string { return fmt.Sprintf("Value: %d", n) },
)(myResult)
// GetOrElse with default
value := result.GetOrElse(func(err error) int { return 0 })(myResult)
```
#### Idiomatic Result
```go
// Fold extracts the value (same API, different input)
output := result.Fold(
func(err error) string { return "Error: " + err.Error() },
func(n int) string { return fmt.Sprintf("Value: %d", n) },
)(value, err)
// GetOrElse with default
value := result.GetOrElse(func(err error) int { return 0 })(value, err)
// Or use native Go pattern
if err != nil {
value = 0
}
```
### Integration with Existing Code
#### Standard Result
```go
// Converting from (value, error) to Result
func doSomething() (int, error) {
return 42, nil
}
result := result.TryCatchError(doSomething())
// Converting back to (value, error)
value, err := result.UnwrapError(result)
```
#### Idiomatic Result
```go
// Direct compatibility with (value, error)
func doSomething() (int, error) {
return 42, nil
}
// No conversion needed!
value, err := doSomething()
value, err = result.Map(double)(value, err)
```
### Pipeline Composition
#### Standard Result
```go
import F "github.com/IBM/fp-go/v2/function"
output := F.Pipe3(
result.Right[error](10),
result.Map(double),
result.Chain(validate),
result.Map(format),
)
// Need to unwrap at the end
value, err := result.UnwrapError(output)
```
#### Idiomatic Result
```go
import F "github.com/IBM/fp-go/v2/function"
value, err := F.Pipe3(
result.Right(10),
result.Map(double),
result.Chain(validate),
result.Map(format),
)
// Already in (value, error) form
if err != nil {
// handle error
}
```
## Detailed Design Comparison
### Type System
#### Standard Result
**Strengths:**
- Full algebraic data type semantics
- Explicit Either[E, A] allows custom error types
- Type-safe by construction
- Clear separation of error and success channels
**Weaknesses:**
- Requires wrapper structs (memory overhead)
- Less familiar to Go developers
- Needs conversion functions for Go's standard library
- More verbose type annotations
#### Idiomatic Result
**Strengths:**
- Native Go idioms (value, error) pattern
- Zero wrapper overhead
- Seamless stdlib integration
- Familiar to all Go developers
- Terser syntax
**Weaknesses:**
- Error type fixed to `error`
- Less explicit about Either semantics
- Cannot use custom error types without conversion
- Slightly less type-safe (can accidentally ignore bool/error)
### Monad Laws
Both packages satisfy the monad laws, but enforce them differently:
#### Standard Result
```go
// Left identity: return a >>= f ≡ f a
assert.Equal(
result.Chain(f)(result.Of(a)),
f(a),
)
// Right identity: m >>= return ≡ m
assert.Equal(
result.Chain(result.Of[int])(m),
m,
)
// Associativity: (m >>= f) >>= g ≡ m >>= (\x -> f x >>= g)
assert.Equal(
result.Chain(g)(result.Chain(f)(m)),
result.Chain(func(x int) result.Result[int] {
return result.Chain(g)(f(x))
})(m),
)
```
#### Idiomatic Result
```go
// Same laws, different syntax
// Left identity
a, aerr := result.Of(val)
b, berr := result.Chain(f)(a, aerr)
c, cerr := f(val)
assert.Equal((b, berr), (c, cerr))
// Right identity
value, err := m()
identity := result.Chain(result.Of[int])
assert.Equal(identity(value, err), (value, err))
// Associativity (same structure, tuple-based)
```
### Error Handling Philosophy
#### Standard Result
```go
// Explicit error handling through types
func processUser(id int) result.Result[User] {
user := fetchUser(id) // Returns Result[User]
return F.Pipe2(
user,
result.Chain(validateUser),
result.Chain(enrichUser),
)
}
// Must explicitly unwrap
user, err := result.UnwrapError(processUser(42))
if err != nil {
log.Error(err)
}
```
#### Idiomatic Result
```go
// Natural Go error handling
func processUser(id int) (User, error) {
user, err := fetchUser(id) // Returns (User, error)
return F.Pipe2(
(user, err),
result.Chain(validateUser),
result.Chain(enrichUser),
)
}
// Already in Go form
user, err := processUser(42)
if err != nil {
log.Error(err)
}
```
### Composition Patterns
#### Standard Result
```go
// Applicative composition
import A "github.com/IBM/fp-go/v2/apply"
type Config struct {
Host string
Port int
DB string
}
config := A.SequenceT3(
result.FromPredicate(validHost, hostError)(host),
result.FromPredicate(validPort, portError)(port),
result.FromPredicate(validDB, dbError)(db),
)(func(h string, p int, d string) Config {
return Config{h, p, d}
})
```
#### Idiomatic Result
```go
// Direct tuple composition
config, err := func() (Config, error) {
host, err := result.FromPredicate(validHost, hostError)(host)
if err != nil {
return Config{}, err
}
port, err := result.FromPredicate(validPort, portError)(port)
if err != nil {
return Config{}, err
}
db, err := result.FromPredicate(validDB, dbError)(db)
if err != nil {
return Config{}, err
}
return Config{host, port, db}, nil
}()
```
## When to Use Each
### Use Idiomatic Result When (Recommended for Most Cases):
1. **Performance Matters**
- Any production service (web servers, APIs, microservices)
- Hot paths and high-throughput scenarios (>1000 req/s)
- Complex operation chains (**32x faster** ChainFirst)
- Real-world pipelines (**2-3x faster**)
- Memory-constrained environments (zero allocations)
- Want **1.2-6x speedup** across most operations
2. **Go Integration** ⭐⭐
- Working with existing Go codebases
- Interfacing with standard library (native (value, error))
- Team familiar with Go, new to FP
- Want zero-cost functional abstractions
- Seamless error handling patterns
3. **Pragmatic Functional Programming**
- Value performance AND functional patterns
- Prefer Go idioms over FP terminology
- Simpler function signatures
- Lower cognitive overhead
- Production-ready patterns
4. **Real-World Applications**
- Web servers, REST APIs, gRPC services
- CLI tools and command-line applications
- Data processing pipelines
- Any latency-sensitive application
- Systems with tight performance budgets
**Performance Gains:** Use idiomatic for 1.2-32x speedup depending on operation, with consistently lower allocations.
### Use Standard Either/Result When:
1. **Type Safety & Flexibility**
- Need explicit Either[E, A] with **custom error types**
- Building domain-specific error hierarchies
- Want to distinguish different error categories at type level
- Type system enforcement is critical
2. **Advanced FP Features**
- Using Do-notation for complex monadic compositions
- Need operations like Flatten, Swap, Bind, Let
- Leveraging advanced type classes (Semigroup, Monoid)
- Want the complete FP toolkit
3. **FP Expertise & Education**
- Porting code from other FP languages (Scala, Haskell)
- Teaching functional programming concepts
- Team has strong FP background
- Explicit algebraic data types preferred
- Code review benefits from FP terminology
4. **Performance is Acceptable**
- After optimizations, Either is **quite fast** (1-5 ns/op for simple operations)
- Difference matters mainly at high scale (millions of operations)
- Code clarity > micro-optimizations
- Simple operations dominate your workload
**Note:** Either package is now performant enough for most use cases. Choose it for features, not performance concerns.
### Hybrid Approach
You can use both packages together:
```go
import (
stdResult "github.com/IBM/fp-go/v2/result"
"github.com/IBM/fp-go/v2/idiomatic/result"
)
// Use standard for complex types
type ValidationError struct {
Field string
Error string
}
func validateInput(input string) stdResult.Either[ValidationError, Input] {
// ... validation logic
}
// Convert to idiomatic for performance
func processInput(input string) (Output, error) {
validated := validateInput(input)
value, err := stdResult.UnwrapError(
stdResult.MapLeft(toError)(validated),
)
// Use idiomatic for hot path
return result.Chain(heavyProcessing)(value, err)
}
```
## Migration Guide
### From Standard to Idiomatic
```go
// Before (standard)
import "github.com/IBM/fp-go/v2/result"
func process(x int) result.Result[int] {
return F.Pipe2(
result.Right[error](x),
result.Map(double),
result.Chain(validate),
)
}
// After (idiomatic)
import "github.com/IBM/fp-go/v2/idiomatic/result"
func process(x int) (int, error) {
return F.Pipe2(
result.Right(x),
result.Map(double),
result.Chain(validate),
)
}
```
### Key Changes
1. **Type signatures**: `Result[T]``(T, error)`
2. **Kleisli**: `func(A) Result[B]``func(A) (B, error)`
3. **Operator**: `func(Result[A]) Result[B]``func(A, error) (B, error)`
4. **Return values**: Function calls return tuples, not wrapped values
5. **Pattern matching**: Same Fold/GetOrElse API, different inputs
## Conclusion
### Performance Summary (After Either Optimizations)
The latest benchmark results show a clear pattern:
**Both packages are now fast**, but idiomatic consistently leads:
- **Constructors & Predicates**: Both ~1-2 ns/op (essentially tied)
- **Core transformations**: Idiomatic **1.2-2.3x faster** (Map, Chain, Fold)
- **Complex operations**: Idiomatic **3-32x faster** (BiMap, ChainFirst)
- **Pipelines**: Idiomatic **2-3.4x faster** with fewer allocations
- **Extraction**: Idiomatic **1.5-6x faster** (GetOrElse, Alt)
**Key Insight:** The idiomatic package delivers **consistently better performance** across the board while maintaining zero-cost abstractions. The Either package is now fast enough for most use cases, but idiomatic is the performance winner.
### Updated Recommendation Matrix
| Scenario | Recommendation | Reason |
|----------|---------------|--------|
| **New Go project** | **Idiomatic** ⭐ | Natural Go patterns, 1.2-6x faster, better integration |
| **Production services** | **Idiomatic** ⭐⭐ | 2-3x faster pipelines, zero allocations, proven performance |
| **Performance critical** | **Idiomatic** ⭐⭐⭐ | 32x faster complex ops, minimal allocations |
| **Microservices/APIs** | **Idiomatic** ⭐⭐ | High throughput, familiar patterns, better performance |
| **CLI Tools** | **Idiomatic** ⭐ | Low overhead, Go idioms, fast |
| Custom error types | Standard/Either | Need Either[E, A] with domain types |
| Learning FP | Standard/Either | Clearer ADT semantics, educational |
| FP-heavy codebase | Standard/Either | Consistency, Do-notation, full FP toolkit |
| Library/Framework | Either way | Both are good; choose based on API style |
### Real-World Impact
For a service handling 10,000 requests/second with typical pipeline operations:
```
Either package: 280 ns/op × 10M req/day = 2,800 seconds = 46.7 minutes
Idiomatic package: 116 ns/op × 10M req/day = 1,160 seconds = 19.3 minutes
Time saved: 27.4 minutes of CPU time per day
```
At scale, this translates to:
- Lower latency (2-3x faster response times for FP operations)
- Reduced CPU usage (fewer cores needed)
- Lower memory pressure (significantly fewer allocations)
- Better resource utilization
### Final Recommendation
**For most Go projects:** Use **idiomatic packages**
- 1.2-32x faster across operations
- Native Go idioms
- Zero-cost abstractions
- Production-proven performance
- Easier integration
**For specialized needs:** Use **standard Either/Result**
- Need custom error types Either[E, A]
- Want Do-notation and advanced FP features
- Porting from FP languages
- Educational/learning context
- FP-heavy existing codebase
### Bottom Line
After optimizations, both packages are excellent:
- **Either/Result**: Fast enough for most use cases, feature-rich, type-safe
- **Idiomatic**: **Faster in practice** (1.2-32x), native Go, zero-cost FP
The idiomatic packages now represent the **best of both worlds**: full functional programming capabilities with Go's native performance and idioms. Unless you specifically need Either[E, A]'s custom error types or advanced FP features, **idiomatic is the recommended choice** for production Go services.
Both maintain the core benefits of functional programming—choose based on whether you prioritize performance & Go integration (idiomatic) or type flexibility & FP features (either).

View File

@@ -0,0 +1,174 @@
# Idiomatic ReadIOResult Functions - Implementation Plan
## Overview
This document outlines the idiomatic functions that should be added to the `readerioresult` package to support Go's native `(value, error)` pattern, similar to what was implemented for `readerresult`.
## Key Concepts
The idiomatic package `github.com/IBM/fp-go/v2/idiomatic/readerioresult` defines:
- `ReaderIOResult[R, A]` as `func(R) func() (A, error)` (idiomatic style)
- This contrasts with `readerioresult.ReaderIOResult[R, A]` which is `Reader[R, IOResult[A]]` (functional style)
## Functions to Add
### In `readerioresult/reader.go`
Add helper functions at the top:
```go
func fromReaderIOResultKleisliI[R, A, B any](f RIORI.Kleisli[R, A, B]) Kleisli[R, A, B] {
return function.Flow2(f, FromReaderIOResultI[R, B])
}
func fromIOResultKleisliI[A, B any](f IORI.Kleisli[A, B]) ioresult.Kleisli[A, B] {
return ioresult.Eitherize1(f)
}
```
### Core Conversion Functions
1. **FromResultI** - Lift `(value, error)` to ReaderIOResult
```go
func FromResultI[R, A any](a A, err error) ReaderIOResult[R, A]
```
2. **FromIOResultI** - Lift idiomatic IOResult to functional
```go
func FromIOResultI[R, A any](ioe func() (A, error)) ReaderIOResult[R, A]
```
3. **FromReaderIOResultI** - Convert idiomatic ReaderIOResult to functional
```go
func FromReaderIOResultI[R, A any](rr RIORI.ReaderIOResult[R, A]) ReaderIOResult[R, A]
```
### Chain Functions
4. **MonadChainI** / **ChainI** - Chain with idiomatic Kleisli
```go
func MonadChainI[R, A, B any](ma ReaderIOResult[R, A], f RIORI.Kleisli[R, A, B]) ReaderIOResult[R, B]
func ChainI[R, A, B any](f RIORI.Kleisli[R, A, B]) Operator[R, A, B]
```
5. **MonadChainEitherIK** / **ChainEitherIK** - Chain with idiomatic Result functions
```go
func MonadChainEitherIK[R, A, B any](ma ReaderIOResult[R, A], f func(A) (B, error)) ReaderIOResult[R, B]
func ChainEitherIK[R, A, B any](f func(A) (B, error)) Operator[R, A, B]
```
6. **MonadChainIOResultIK** / **ChainIOResultIK** - Chain with idiomatic IOResult
```go
func MonadChainIOResultIK[R, A, B any](ma ReaderIOResult[R, A], f func(A) func() (B, error)) ReaderIOResult[R, B]
func ChainIOResultIK[R, A, B any](f func(A) func() (B, error)) Operator[R, A, B]
```
### Applicative Functions
7. **MonadApI** / **ApI** - Apply with idiomatic value
```go
func MonadApI[B, R, A any](fab ReaderIOResult[R, func(A) B], fa RIORI.ReaderIOResult[R, A]) ReaderIOResult[R, B]
func ApI[B, R, A any](fa RIORI.ReaderIOResult[R, A]) Operator[R, func(A) B, B]
```
### Error Handling Functions
8. **OrElseI** - Fallback with idiomatic computation
```go
func OrElseI[R, A any](onLeft RIORI.Kleisli[R, error, A]) Operator[R, A, A]
```
9. **MonadAltI** / **AltI** - Alternative with idiomatic computation
```go
func MonadAltI[R, A any](first ReaderIOResult[R, A], second Lazy[RIORI.ReaderIOResult[R, A]]) ReaderIOResult[R, A]
func AltI[R, A any](second Lazy[RIORI.ReaderIOResult[R, A]]) Operator[R, A, A]
```
### Flatten Functions
10. **FlattenI** - Flatten nested idiomatic ReaderIOResult
```go
func FlattenI[R, A any](mma ReaderIOResult[R, RIORI.ReaderIOResult[R, A]]) ReaderIOResult[R, A]
```
### In `readerioresult/bind.go`
11. **BindI** - Bind with idiomatic Kleisli
```go
func BindI[R, S1, S2, T any](setter func(T) func(S1) S2, f RIORI.Kleisli[R, S1, T]) Operator[R, S1, S2]
```
12. **ApIS** - Apply idiomatic value to state
```go
func ApIS[R, S1, S2, T any](setter func(T) func(S1) S2, fa RIORI.ReaderIOResult[R, T]) Operator[R, S1, S2]
```
13. **ApISL** - Apply idiomatic value using lens
```go
func ApISL[R, S, T any](lens L.Lens[S, T], fa RIORI.ReaderIOResult[R, T]) Operator[R, S, S]
```
14. **BindIL** - Bind idiomatic with lens
```go
func BindIL[R, S, T any](lens L.Lens[S, T], f RIORI.Kleisli[R, T, T]) Operator[R, S, S]
```
15. **BindEitherIK** / **BindResultIK** - Bind idiomatic Result
```go
func BindEitherIK[R, S1, S2, T any](setter func(T) func(S1) S2, f func(S1) (T, error)) Operator[R, S1, S2]
func BindResultIK[R, S1, S2, T any](setter func(T) func(S1) S2, f func(S1) (T, error)) Operator[R, S1, S2]
```
16. **BindIOResultIK** - Bind idiomatic IOResult
```go
func BindIOResultIK[R, S1, S2, T any](setter func(T) func(S1) S2, f func(S1) func() (T, error)) Operator[R, S1, S2]
```
17. **BindToEitherI** / **BindToResultI** - Initialize from idiomatic pair
```go
func BindToEitherI[R, S1, T any](setter func(T) S1) func(T, error) ReaderIOResult[R, S1]
func BindToResultI[R, S1, T any](setter func(T) S1) func(T, error) ReaderIOResult[R, S1]
```
18. **BindToIOResultI** - Initialize from idiomatic IOResult
```go
func BindToIOResultI[R, S1, T any](setter func(T) S1) func(func() (T, error)) ReaderIOResult[R, S1]
```
19. **ApEitherIS** / **ApResultIS** - Apply idiomatic pair to state
```go
func ApEitherIS[R, S1, S2, T any](setter func(T) func(S1) S2) func(T, error) Operator[R, S1, S2]
func ApResultIS[R, S1, S2, T any](setter func(T) func(S1) S2) func(T, error) Operator[R, S1, S2]
```
20. **ApIOResultIS** - Apply idiomatic IOResult to state
```go
func ApIOResultIS[R, S1, S2, T any](setter func(T) func(S1) S2, fa func() (T, error)) Operator[R, S1, S2]
```
## Testing Strategy
Create `readerioresult/idiomatic_test.go` with:
- Tests for each idiomatic function
- Success and error cases
- Integration tests showing real-world usage patterns
- Parallel execution tests where applicable
- Complex scenarios combining multiple idiomatic functions
## Implementation Priority
1. **High Priority** - Core conversion and chain functions (1-6)
2. **Medium Priority** - Bind functions for do-notation (11-16)
3. **Low Priority** - Advanced applicative and error handling (7-10, 17-20)
## Benefits
1. **Seamless Integration** - Mix Go idiomatic code with functional pipelines
2. **Gradual Adoption** - Convert code incrementally from idiomatic to functional
3. **Interoperability** - Work with existing Go libraries that return `(value, error)`
4. **Consistency** - Mirrors the successful pattern from `readerresult`
## References
- See `readerresult` package for similar implementations
- See `idiomatic/readerresult` for the idiomatic types
- See `idiomatic/ioresult` for IO-level idiomatic patterns

View File

@@ -61,6 +61,7 @@ package main
import ( import (
"fmt" "fmt"
"github.com/IBM/fp-go/v2/option" "github.com/IBM/fp-go/v2/option"
N "github.com/IBM/fp-go/v2/number"
) )
func main() { func main() {
@@ -69,7 +70,7 @@ func main() {
none := option.None[int]() none := option.None[int]()
// Map over values // Map over values
doubled := option.Map(func(x int) int { return x * 2 })(some) doubled := option.Map(N.Mul(2))(some)
fmt.Println(option.GetOrElse(0)(doubled)) // Output: 84 fmt.Println(option.GetOrElse(0)(doubled)) // Output: 84
// Chain operations // Chain operations
@@ -145,6 +146,8 @@ func main() {
} }
``` ```
## ⚠️ Breaking Changes
### From V1 to V2 ### From V1 to V2
#### 1. Generic Type Aliases #### 1. Generic Type Aliases
@@ -187,7 +190,7 @@ Monadic operations for `Pair` now operate on the **second argument** to align wi
```go ```go
// Operations on first element // Operations on first element
pair := MakePair(1, "hello") pair := MakePair(1, "hello")
result := Map(func(x int) int { return x * 2 })(pair) // Pair(2, "hello") result := Map(N.Mul(2))(pair) // Pair(2, "hello")
``` ```
**V2:** **V2:**
@@ -204,8 +207,8 @@ The `Compose` function for endomorphisms now follows **mathematical function com
**V1:** **V1:**
```go ```go
// Compose executed left-to-right // Compose executed left-to-right
double := func(x int) int { return x * 2 } double := N.Mul(2)
increment := func(x int) int { return x + 1 } increment := N.Add(1)
composed := Compose(double, increment) composed := Compose(double, increment)
result := composed(5) // (5 * 2) + 1 = 11 result := composed(5) // (5 * 2) + 1 = 11
``` ```
@@ -213,8 +216,8 @@ result := composed(5) // (5 * 2) + 1 = 11
**V2:** **V2:**
```go ```go
// Compose executes RIGHT-TO-LEFT (mathematical composition) // Compose executes RIGHT-TO-LEFT (mathematical composition)
double := func(x int) int { return x * 2 } double := N.Mul(2)
increment := func(x int) int { return x + 1 } increment := N.Add(1)
composed := Compose(double, increment) composed := Compose(double, increment)
result := composed(5) // (5 + 1) * 2 = 12 result := composed(5) // (5 + 1) * 2 = 12
@@ -368,7 +371,7 @@ If you're using `Pair`, update operations to work on the second element:
```go ```go
pair := MakePair(42, "data") pair := MakePair(42, "data")
// Map operates on first element // Map operates on first element
result := Map(func(x int) int { return x * 2 })(pair) result := Map(N.Mul(2))(pair)
``` ```
**After (V2):** **After (V2):**

View File

@@ -17,11 +17,10 @@ package array
import ( import (
G "github.com/IBM/fp-go/v2/array/generic" G "github.com/IBM/fp-go/v2/array/generic"
EM "github.com/IBM/fp-go/v2/endomorphism"
F "github.com/IBM/fp-go/v2/function" F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/internal/array" "github.com/IBM/fp-go/v2/internal/array"
M "github.com/IBM/fp-go/v2/monoid" M "github.com/IBM/fp-go/v2/monoid"
O "github.com/IBM/fp-go/v2/option" "github.com/IBM/fp-go/v2/option"
"github.com/IBM/fp-go/v2/tuple" "github.com/IBM/fp-go/v2/tuple"
) )
@@ -50,16 +49,16 @@ func Replicate[A any](n int, a A) []A {
// This is the monadic version of Map that takes the array as the first parameter. // This is the monadic version of Map that takes the array as the first parameter.
// //
//go:inline //go:inline
func MonadMap[A, B any](as []A, f func(a A) B) []B { func MonadMap[A, B any](as []A, f func(A) B) []B {
return G.MonadMap[[]A, []B](as, f) return G.MonadMap[[]A, []B](as, f)
} }
// MonadMapRef applies a function to a pointer to each element of an array, returning a new array with the results. // MonadMapRef applies a function to a pointer to each element of an array, returning a new array with the results.
// This is useful when you need to access elements by reference without copying. // This is useful when you need to access elements by reference without copying.
func MonadMapRef[A, B any](as []A, f func(a *A) B) []B { func MonadMapRef[A, B any](as []A, f func(*A) B) []B {
count := len(as) count := len(as)
bs := make([]B, count) bs := make([]B, count)
for i := count - 1; i >= 0; i-- { for i := range count {
bs[i] = f(&as[i]) bs[i] = f(&as[i])
} }
return bs return bs
@@ -68,7 +67,7 @@ func MonadMapRef[A, B any](as []A, f func(a *A) B) []B {
// MapWithIndex applies a function to each element and its index in an array, returning a new array with the results. // MapWithIndex applies a function to each element and its index in an array, returning a new array with the results.
// //
//go:inline //go:inline
func MapWithIndex[A, B any](f func(int, A) B) func([]A) []B { func MapWithIndex[A, B any](f func(int, A) B) Operator[A, B] {
return G.MapWithIndex[[]A, []B](f) return G.MapWithIndex[[]A, []B](f)
} }
@@ -77,39 +76,39 @@ func MapWithIndex[A, B any](f func(int, A) B) func([]A) []B {
// //
// Example: // Example:
// //
// double := array.Map(func(x int) int { return x * 2 }) // double := array.Map(N.Mul(2))
// result := double([]int{1, 2, 3}) // [2, 4, 6] // result := double([]int{1, 2, 3}) // [2, 4, 6]
// //
//go:inline //go:inline
func Map[A, B any](f func(a A) B) func([]A) []B { func Map[A, B any](f func(A) B) Operator[A, B] {
return G.Map[[]A, []B](f) return G.Map[[]A, []B](f)
} }
// MapRef applies a function to a pointer to each element of an array, returning a new array with the results. // MapRef applies a function to a pointer to each element of an array, returning a new array with the results.
// This is the curried version that returns a function. // This is the curried version that returns a function.
func MapRef[A, B any](f func(a *A) B) func([]A) []B { func MapRef[A, B any](f func(*A) B) Operator[A, B] {
return F.Bind2nd(MonadMapRef[A, B], f) return F.Bind2nd(MonadMapRef[A, B], f)
} }
func filterRef[A any](fa []A, pred func(a *A) bool) []A { func filterRef[A any](fa []A, pred func(*A) bool) []A {
var result []A
count := len(fa) count := len(fa)
for i := 0; i < count; i++ { var result []A = make([]A, 0, count)
a := fa[i] for i := range count {
if pred(&a) { a := &fa[i]
result = append(result, a) if pred(a) {
result = append(result, *a)
} }
} }
return result return result
} }
func filterMapRef[A, B any](fa []A, pred func(a *A) bool, f func(a *A) B) []B { func filterMapRef[A, B any](fa []A, pred func(*A) bool, f func(*A) B) []B {
var result []B
count := len(fa) count := len(fa)
for i := 0; i < count; i++ { var result []B = make([]B, 0, count)
a := fa[i] for i := range count {
if pred(&a) { a := &fa[i]
result = append(result, f(&a)) if pred(a) {
result = append(result, f(a))
} }
} }
return result return result
@@ -118,19 +117,19 @@ func filterMapRef[A, B any](fa []A, pred func(a *A) bool, f func(a *A) B) []B {
// Filter returns a new array with all elements from the original array that match a predicate // Filter returns a new array with all elements from the original array that match a predicate
// //
//go:inline //go:inline
func Filter[A any](pred func(A) bool) EM.Endomorphism[[]A] { func Filter[A any](pred func(A) bool) Operator[A, A] {
return G.Filter[[]A](pred) return G.Filter[[]A](pred)
} }
// FilterWithIndex returns a new array with all elements from the original array that match a predicate // FilterWithIndex returns a new array with all elements from the original array that match a predicate
// //
//go:inline //go:inline
func FilterWithIndex[A any](pred func(int, A) bool) EM.Endomorphism[[]A] { func FilterWithIndex[A any](pred func(int, A) bool) Operator[A, A] {
return G.FilterWithIndex[[]A](pred) return G.FilterWithIndex[[]A](pred)
} }
// FilterRef returns a new array with all elements from the original array that match a predicate operating on pointers. // FilterRef returns a new array with all elements from the original array that match a predicate operating on pointers.
func FilterRef[A any](pred func(*A) bool) EM.Endomorphism[[]A] { func FilterRef[A any](pred func(*A) bool) Operator[A, A] {
return F.Bind2nd(filterRef[A], pred) return F.Bind2nd(filterRef[A], pred)
} }
@@ -138,7 +137,7 @@ func FilterRef[A any](pred func(*A) bool) EM.Endomorphism[[]A] {
// This is the monadic version that takes the array as the first parameter. // This is the monadic version that takes the array as the first parameter.
// //
//go:inline //go:inline
func MonadFilterMap[A, B any](fa []A, f func(A) O.Option[B]) []B { func MonadFilterMap[A, B any](fa []A, f option.Kleisli[A, B]) []B {
return G.MonadFilterMap[[]A, []B](fa, f) return G.MonadFilterMap[[]A, []B](fa, f)
} }
@@ -146,33 +145,33 @@ func MonadFilterMap[A, B any](fa []A, f func(A) O.Option[B]) []B {
// keeping only the Some values. This is the monadic version that takes the array as the first parameter. // keeping only the Some values. This is the monadic version that takes the array as the first parameter.
// //
//go:inline //go:inline
func MonadFilterMapWithIndex[A, B any](fa []A, f func(int, A) O.Option[B]) []B { func MonadFilterMapWithIndex[A, B any](fa []A, f func(int, A) Option[B]) []B {
return G.MonadFilterMapWithIndex[[]A, []B](fa, f) return G.MonadFilterMapWithIndex[[]A, []B](fa, f)
} }
// FilterMap maps an array with an iterating function that returns an [O.Option] and it keeps only the Some values discarding the Nones. // FilterMap maps an array with an iterating function that returns an [Option] and it keeps only the Some values discarding the Nones.
// //
//go:inline //go:inline
func FilterMap[A, B any](f func(A) O.Option[B]) func([]A) []B { func FilterMap[A, B any](f option.Kleisli[A, B]) Operator[A, B] {
return G.FilterMap[[]A, []B](f) return G.FilterMap[[]A, []B](f)
} }
// FilterMapWithIndex maps an array with an iterating function that returns an [O.Option] and it keeps only the Some values discarding the Nones. // FilterMapWithIndex maps an array with an iterating function that returns an [Option] and it keeps only the Some values discarding the Nones.
// //
//go:inline //go:inline
func FilterMapWithIndex[A, B any](f func(int, A) O.Option[B]) func([]A) []B { func FilterMapWithIndex[A, B any](f func(int, A) Option[B]) Operator[A, B] {
return G.FilterMapWithIndex[[]A, []B](f) return G.FilterMapWithIndex[[]A, []B](f)
} }
// FilterChain maps an array with an iterating function that returns an [O.Option] of an array. It keeps only the Some values discarding the Nones and then flattens the result. // FilterChain maps an array with an iterating function that returns an [Option] of an array. It keeps only the Some values discarding the Nones and then flattens the result.
// //
//go:inline //go:inline
func FilterChain[A, B any](f func(A) O.Option[[]B]) func([]A) []B { func FilterChain[A, B any](f option.Kleisli[A, []B]) Operator[A, B] {
return G.FilterChain[[]A](f) return G.FilterChain[[]A](f)
} }
// FilterMapRef filters an array using a predicate on pointers and maps the matching elements using a function on pointers. // FilterMapRef filters an array using a predicate on pointers and maps the matching elements using a function on pointers.
func FilterMapRef[A, B any](pred func(a *A) bool, f func(a *A) B) func([]A) []B { func FilterMapRef[A, B any](pred func(a *A) bool, f func(*A) B) Operator[A, B] {
return func(fa []A) []B { return func(fa []A) []B {
return filterMapRef(fa, pred, f) return filterMapRef(fa, pred, f)
} }
@@ -180,8 +179,7 @@ func FilterMapRef[A, B any](pred func(a *A) bool, f func(a *A) B) func([]A) []B
func reduceRef[A, B any](fa []A, f func(B, *A) B, initial B) B { func reduceRef[A, B any](fa []A, f func(B, *A) B, initial B) B {
current := initial current := initial
count := len(fa) for i := range len(fa) {
for i := 0; i < count; i++ {
current = f(current, &fa[i]) current = f(current, &fa[i])
} }
return current return current
@@ -262,6 +260,8 @@ func Empty[A any]() []A {
} }
// Zero returns an empty array of type A (alias for Empty). // Zero returns an empty array of type A (alias for Empty).
//
//go:inline
func Zero[A any]() []A { func Zero[A any]() []A {
return Empty[A]() return Empty[A]()
} }
@@ -277,7 +277,7 @@ func Of[A any](a A) []A {
// This is the monadic version that takes the array as the first parameter (also known as FlatMap). // This is the monadic version that takes the array as the first parameter (also known as FlatMap).
// //
//go:inline //go:inline
func MonadChain[A, B any](fa []A, f func(a A) []B) []B { func MonadChain[A, B any](fa []A, f Kleisli[A, B]) []B {
return G.MonadChain(fa, f) return G.MonadChain(fa, f)
} }
@@ -290,7 +290,7 @@ func MonadChain[A, B any](fa []A, f func(a A) []B) []B {
// result := duplicate([]int{1, 2, 3}) // [1, 1, 2, 2, 3, 3] // result := duplicate([]int{1, 2, 3}) // [1, 1, 2, 2, 3, 3]
// //
//go:inline //go:inline
func Chain[A, B any](f func(A) []B) func([]A) []B { func Chain[A, B any](f Kleisli[A, B]) Operator[A, B] {
return G.Chain[[]A](f) return G.Chain[[]A](f)
} }
@@ -306,7 +306,7 @@ func MonadAp[B, A any](fab []func(A) B, fa []A) []B {
// This is the curried version. // This is the curried version.
// //
//go:inline //go:inline
func Ap[B, A any](fa []A) func([]func(A) B) []B { func Ap[B, A any](fa []A) Operator[func(A) B, B] {
return G.Ap[[]B, []func(A) B](fa) return G.Ap[[]B, []func(A) B](fa)
} }
@@ -328,7 +328,7 @@ func MatchLeft[A, B any](onEmpty func() B, onNonEmpty func(A, []A) B) func([]A)
// Returns None if the array is empty. // Returns None if the array is empty.
// //
//go:inline //go:inline
func Tail[A any](as []A) O.Option[[]A] { func Tail[A any](as []A) Option[[]A] {
return G.Tail(as) return G.Tail(as)
} }
@@ -336,7 +336,7 @@ func Tail[A any](as []A) O.Option[[]A] {
// Returns None if the array is empty. // Returns None if the array is empty.
// //
//go:inline //go:inline
func Head[A any](as []A) O.Option[A] { func Head[A any](as []A) Option[A] {
return G.Head(as) return G.Head(as)
} }
@@ -344,7 +344,7 @@ func Head[A any](as []A) O.Option[A] {
// Returns None if the array is empty. // Returns None if the array is empty.
// //
//go:inline //go:inline
func First[A any](as []A) O.Option[A] { func First[A any](as []A) Option[A] {
return G.First(as) return G.First(as)
} }
@@ -352,12 +352,12 @@ func First[A any](as []A) O.Option[A] {
// Returns None if the array is empty. // Returns None if the array is empty.
// //
//go:inline //go:inline
func Last[A any](as []A) O.Option[A] { func Last[A any](as []A) Option[A] {
return G.Last(as) return G.Last(as)
} }
// PrependAll inserts a separator before each element of an array. // PrependAll inserts a separator before each element of an array.
func PrependAll[A any](middle A) EM.Endomorphism[[]A] { func PrependAll[A any](middle A) Operator[A, A] {
return func(as []A) []A { return func(as []A) []A {
count := len(as) count := len(as)
dst := count * 2 dst := count * 2
@@ -377,7 +377,7 @@ func PrependAll[A any](middle A) EM.Endomorphism[[]A] {
// Example: // Example:
// //
// result := array.Intersperse(0)([]int{1, 2, 3}) // [1, 0, 2, 0, 3] // result := array.Intersperse(0)([]int{1, 2, 3}) // [1, 0, 2, 0, 3]
func Intersperse[A any](middle A) EM.Endomorphism[[]A] { func Intersperse[A any](middle A) Operator[A, A] {
prepend := PrependAll(middle) prepend := PrependAll(middle)
return func(as []A) []A { return func(as []A) []A {
if IsEmpty(as) { if IsEmpty(as) {
@@ -406,7 +406,7 @@ func Flatten[A any](mma [][]A) []A {
} }
// Slice extracts a subarray from index low (inclusive) to high (exclusive). // Slice extracts a subarray from index low (inclusive) to high (exclusive).
func Slice[A any](low, high int) func(as []A) []A { func Slice[A any](low, high int) Operator[A, A] {
return array.Slice[[]A](low, high) return array.Slice[[]A](low, high)
} }
@@ -414,7 +414,7 @@ func Slice[A any](low, high int) func(as []A) []A {
// Returns None if the index is out of bounds. // Returns None if the index is out of bounds.
// //
//go:inline //go:inline
func Lookup[A any](idx int) func([]A) O.Option[A] { func Lookup[A any](idx int) func([]A) Option[A] {
return G.Lookup[[]A](idx) return G.Lookup[[]A](idx)
} }
@@ -422,7 +422,7 @@ func Lookup[A any](idx int) func([]A) O.Option[A] {
// If the index is out of bounds, the element is appended. // If the index is out of bounds, the element is appended.
// //
//go:inline //go:inline
func UpsertAt[A any](a A) EM.Endomorphism[[]A] { func UpsertAt[A any](a A) Operator[A, A] {
return G.UpsertAt[[]A](a) return G.UpsertAt[[]A](a)
} }
@@ -468,7 +468,7 @@ func ConstNil[A any]() []A {
// SliceRight extracts a subarray from the specified start index to the end. // SliceRight extracts a subarray from the specified start index to the end.
// //
//go:inline //go:inline
func SliceRight[A any](start int) EM.Endomorphism[[]A] { func SliceRight[A any](start int) Operator[A, A] {
return G.SliceRight[[]A](start) return G.SliceRight[[]A](start)
} }
@@ -482,7 +482,7 @@ func Copy[A any](b []A) []A {
// Clone creates a deep copy of the array using the provided endomorphism to clone the values // Clone creates a deep copy of the array using the provided endomorphism to clone the values
// //
//go:inline //go:inline
func Clone[A any](f func(A) A) func(as []A) []A { func Clone[A any](f func(A) A) Operator[A, A] {
return G.Clone[[]A](f) return G.Clone[[]A](f)
} }
@@ -510,8 +510,8 @@ func Fold[A any](m M.Monoid[A]) func([]A) A {
// Push adds an element to the end of an array (alias for Append). // Push adds an element to the end of an array (alias for Append).
// //
//go:inline //go:inline
func Push[A any](a A) EM.Endomorphism[[]A] { func Push[A any](a A) Operator[A, A] {
return G.Push[EM.Endomorphism[[]A]](a) return G.Push[Operator[A, A]](a)
} }
// MonadFlap applies a value to an array of functions, producing an array of results. // MonadFlap applies a value to an array of functions, producing an array of results.
@@ -526,13 +526,99 @@ func MonadFlap[B, A any](fab []func(A) B, a A) []B {
// This is the curried version. // This is the curried version.
// //
//go:inline //go:inline
func Flap[B, A any](a A) func([]func(A) B) []B { func Flap[B, A any](a A) Operator[func(A) B, B] {
return G.Flap[func(A) B, []func(A) B, []B](a) return G.Flap[func(A) B, []func(A) B, []B](a)
} }
// Prepend adds an element to the beginning of an array, returning a new array. // Prepend adds an element to the beginning of an array, returning a new array.
// //
//go:inline //go:inline
func Prepend[A any](head A) EM.Endomorphism[[]A] { func Prepend[A any](head A) Operator[A, A] {
return G.Prepend[EM.Endomorphism[[]A]](head) return G.Prepend[Operator[A, A]](head)
}
// Reverse returns a new slice with elements in reverse order.
// This function creates a new slice containing all elements from the input slice
// in reverse order, without modifying the original slice.
//
// Type Parameters:
// - A: The type of elements in the slice
//
// Parameters:
// - as: The input slice to reverse
//
// Returns:
// - A new slice with elements in reverse order
//
// Behavior:
// - Creates a new slice with the same length as the input
// - Copies elements from the input slice in reverse order
// - Does not modify the original slice
// - Returns an empty slice if the input is empty
// - Returns a single-element slice unchanged if input has one element
//
// Example:
//
// numbers := []int{1, 2, 3, 4, 5}
// reversed := array.Reverse(numbers)
// // reversed: []int{5, 4, 3, 2, 1}
// // numbers: []int{1, 2, 3, 4, 5} (unchanged)
//
// Example with strings:
//
// words := []string{"hello", "world", "foo", "bar"}
// reversed := array.Reverse(words)
// // reversed: []string{"bar", "foo", "world", "hello"}
//
// Example with empty slice:
//
// empty := []int{}
// reversed := array.Reverse(empty)
// // reversed: []int{} (empty slice)
//
// Example with single element:
//
// single := []string{"only"}
// reversed := array.Reverse(single)
// // reversed: []string{"only"}
//
// Use cases:
// - Reversing the order of elements for display or processing
// - Implementing stack-like behavior (LIFO)
// - Processing data in reverse chronological order
// - Reversing transformation pipelines
// - Creating palindrome checks
// - Implementing undo/redo functionality
//
// Example with processing in reverse:
//
// events := []string{"start", "middle", "end"}
// reversed := array.Reverse(events)
// // Process events in reverse order
// for _, event := range reversed {
// fmt.Println(event) // Prints: "end", "middle", "start"
// }
//
// Example with functional composition:
//
// numbers := []int{1, 2, 3, 4, 5}
// result := F.Pipe2(
// numbers,
// array.Map(N.Mul(2)),
// array.Reverse,
// )
// // result: []int{10, 8, 6, 4, 2}
//
// Performance:
// - Time complexity: O(n) where n is the length of the slice
// - Space complexity: O(n) for the new slice
// - Does not allocate if the input slice is empty
//
// Note: This function is immutable - it does not modify the original slice.
// If you need to reverse a slice in-place, consider using a different approach
// or modifying the slice directly.
//
//go:inline
func Reverse[A any](as []A) []A {
return G.Reverse(as)
} }

View File

@@ -35,7 +35,7 @@ func TestReplicate(t *testing.T) {
func TestMonadMap(t *testing.T) { func TestMonadMap(t *testing.T) {
src := []int{1, 2, 3} src := []int{1, 2, 3}
result := MonadMap(src, func(x int) int { return x * 2 }) result := MonadMap(src, N.Mul(2))
assert.Equal(t, []int{2, 4, 6}, result) assert.Equal(t, []int{2, 4, 6}, result)
} }
@@ -173,8 +173,8 @@ func TestChain(t *testing.T) {
func TestMonadAp(t *testing.T) { func TestMonadAp(t *testing.T) {
fns := []func(int) int{ fns := []func(int) int{
func(x int) int { return x * 2 }, N.Mul(2),
func(x int) int { return x + 10 }, N.Add(10),
} }
values := []int{1, 2} values := []int{1, 2}
result := MonadAp(fns, values) result := MonadAp(fns, values)
@@ -268,7 +268,7 @@ func TestCopy(t *testing.T) {
func TestClone(t *testing.T) { func TestClone(t *testing.T) {
src := []int{1, 2, 3} src := []int{1, 2, 3}
cloner := Clone(func(x int) int { return x * 2 }) cloner := Clone(N.Mul(2))
result := cloner(src) result := cloner(src)
assert.Equal(t, []int{2, 4, 6}, result) assert.Equal(t, []int{2, 4, 6}, result)
} }

View File

@@ -22,6 +22,7 @@ import (
F "github.com/IBM/fp-go/v2/function" F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/internal/utils" "github.com/IBM/fp-go/v2/internal/utils"
N "github.com/IBM/fp-go/v2/number"
O "github.com/IBM/fp-go/v2/option" O "github.com/IBM/fp-go/v2/option"
S "github.com/IBM/fp-go/v2/string" S "github.com/IBM/fp-go/v2/string"
T "github.com/IBM/fp-go/v2/tuple" T "github.com/IBM/fp-go/v2/tuple"
@@ -214,3 +215,262 @@ func ExampleFoldMap() {
// Output: ABC // Output: ABC
} }
// TestReverse tests the Reverse function
func TestReverse(t *testing.T) {
t.Run("Reverse integers", func(t *testing.T) {
input := []int{1, 2, 3, 4, 5}
result := Reverse(input)
expected := []int{5, 4, 3, 2, 1}
assert.Equal(t, expected, result)
})
t.Run("Reverse strings", func(t *testing.T) {
input := []string{"hello", "world", "foo", "bar"}
result := Reverse(input)
expected := []string{"bar", "foo", "world", "hello"}
assert.Equal(t, expected, result)
})
t.Run("Reverse empty slice", func(t *testing.T) {
input := []int{}
result := Reverse(input)
assert.Equal(t, []int{}, result)
})
t.Run("Reverse single element", func(t *testing.T) {
input := []string{"only"}
result := Reverse(input)
assert.Equal(t, []string{"only"}, result)
})
t.Run("Reverse two elements", func(t *testing.T) {
input := []int{1, 2}
result := Reverse(input)
assert.Equal(t, []int{2, 1}, result)
})
t.Run("Does not modify original slice", func(t *testing.T) {
original := []int{1, 2, 3, 4, 5}
originalCopy := []int{1, 2, 3, 4, 5}
_ = Reverse(original)
assert.Equal(t, originalCopy, original)
})
t.Run("Reverse with floats", func(t *testing.T) {
input := []float64{1.1, 2.2, 3.3}
result := Reverse(input)
expected := []float64{3.3, 2.2, 1.1}
assert.Equal(t, expected, result)
})
t.Run("Reverse with structs", func(t *testing.T) {
type Person struct {
Name string
Age int
}
input := []Person{
{"Alice", 30},
{"Bob", 25},
{"Charlie", 35},
}
result := Reverse(input)
expected := []Person{
{"Charlie", 35},
{"Bob", 25},
{"Alice", 30},
}
assert.Equal(t, expected, result)
})
t.Run("Reverse with pointers", func(t *testing.T) {
a, b, c := 1, 2, 3
input := []*int{&a, &b, &c}
result := Reverse(input)
assert.Equal(t, []*int{&c, &b, &a}, result)
})
t.Run("Double reverse returns original order", func(t *testing.T) {
original := []int{1, 2, 3, 4, 5}
reversed := Reverse(original)
doubleReversed := Reverse(reversed)
assert.Equal(t, original, doubleReversed)
})
t.Run("Reverse with large slice", func(t *testing.T) {
input := MakeBy(1000, F.Identity[int])
result := Reverse(input)
// Check first and last elements
assert.Equal(t, 999, result[0])
assert.Equal(t, 0, result[999])
// Check length
assert.Equal(t, 1000, len(result))
})
t.Run("Reverse palindrome", func(t *testing.T) {
input := []int{1, 2, 3, 2, 1}
result := Reverse(input)
assert.Equal(t, input, result)
})
}
// TestReverseComposition tests Reverse with other array operations
func TestReverseComposition(t *testing.T) {
t.Run("Reverse after Map", func(t *testing.T) {
input := []int{1, 2, 3, 4, 5}
result := F.Pipe2(
input,
Map(N.Mul(2)),
Reverse[int],
)
expected := []int{10, 8, 6, 4, 2}
assert.Equal(t, expected, result)
})
t.Run("Map after Reverse", func(t *testing.T) {
input := []int{1, 2, 3, 4, 5}
result := F.Pipe2(
input,
Reverse[int],
Map(N.Mul(2)),
)
expected := []int{10, 8, 6, 4, 2}
assert.Equal(t, expected, result)
})
t.Run("Reverse with Filter", func(t *testing.T) {
input := []int{1, 2, 3, 4, 5, 6}
result := F.Pipe2(
input,
Filter(func(n int) bool { return n%2 == 0 }),
Reverse[int],
)
expected := []int{6, 4, 2}
assert.Equal(t, expected, result)
})
t.Run("Reverse with Reduce", func(t *testing.T) {
input := []string{"a", "b", "c"}
reversed := Reverse(input)
result := Reduce(func(acc, val string) string {
return acc + val
}, "")(reversed)
assert.Equal(t, "cba", result)
})
t.Run("Reverse with Flatten", func(t *testing.T) {
input := [][]int{{1, 2}, {3, 4}, {5, 6}}
result := F.Pipe2(
input,
Reverse[[]int],
Flatten[int],
)
expected := []int{5, 6, 3, 4, 1, 2}
assert.Equal(t, expected, result)
})
}
// TestReverseUseCases demonstrates practical use cases for Reverse
func TestReverseUseCases(t *testing.T) {
t.Run("Process events in reverse chronological order", func(t *testing.T) {
events := []string{"2024-01-01", "2024-01-02", "2024-01-03"}
reversed := Reverse(events)
// Most recent first
assert.Equal(t, "2024-01-03", reversed[0])
assert.Equal(t, "2024-01-01", reversed[2])
})
t.Run("Implement stack behavior (LIFO)", func(t *testing.T) {
stack := []int{1, 2, 3, 4, 5}
reversed := Reverse(stack)
// Pop from reversed (LIFO)
assert.Equal(t, 5, reversed[0])
assert.Equal(t, 4, reversed[1])
})
t.Run("Reverse string characters", func(t *testing.T) {
chars := []rune("hello")
reversed := Reverse(chars)
result := string(reversed)
assert.Equal(t, "olleh", result)
})
t.Run("Check palindrome", func(t *testing.T) {
word := []rune("racecar")
reversed := Reverse(word)
assert.Equal(t, word, reversed)
notPalindrome := []rune("hello")
reversedNot := Reverse(notPalindrome)
assert.NotEqual(t, notPalindrome, reversedNot)
})
t.Run("Reverse transformation pipeline", func(t *testing.T) {
// Apply transformations in reverse order
numbers := []int{1, 2, 3}
// Normal: add 10, then multiply by 2
normal := F.Pipe2(
numbers,
Map(N.Add(10)),
Map(N.Mul(2)),
)
// Reversed order of operations
reversed := F.Pipe2(
numbers,
Map(N.Mul(2)),
Map(N.Add(10)),
)
assert.NotEqual(t, normal, reversed)
assert.Equal(t, []int{22, 24, 26}, normal)
assert.Equal(t, []int{12, 14, 16}, reversed)
})
}
// TestReverseProperties tests mathematical properties of Reverse
func TestReverseProperties(t *testing.T) {
t.Run("Involution property: Reverse(Reverse(x)) == x", func(t *testing.T) {
testCases := [][]int{
{1, 2, 3, 4, 5},
{1},
{},
{1, 2},
{5, 4, 3, 2, 1},
}
for _, original := range testCases {
result := Reverse(Reverse(original))
assert.Equal(t, original, result)
}
})
t.Run("Length preservation: len(Reverse(x)) == len(x)", func(t *testing.T) {
testCases := [][]int{
{1, 2, 3, 4, 5},
{1},
{},
MakeBy(100, F.Identity[int]),
}
for _, input := range testCases {
result := Reverse(input)
assert.Equal(t, len(input), len(result))
}
})
t.Run("First element becomes last", func(t *testing.T) {
input := []int{1, 2, 3, 4, 5}
result := Reverse(input)
if len(input) > 0 {
assert.Equal(t, input[0], result[len(result)-1])
assert.Equal(t, input[len(input)-1], result[0])
}
})
}

View File

@@ -56,8 +56,8 @@ func Do[S any](
//go:inline //go:inline
func Bind[S1, S2, T any]( func Bind[S1, S2, T any](
setter func(T) func(S1) S2, setter func(T) func(S1) S2,
f func(S1) []T, f Kleisli[S1, T],
) func([]S1) []S2 { ) Operator[S1, S2] {
return G.Bind[[]S1, []S2](setter, f) return G.Bind[[]S1, []S2](setter, f)
} }
@@ -79,7 +79,7 @@ func Bind[S1, S2, T any](
func Let[S1, S2, T any]( func Let[S1, S2, T any](
setter func(T) func(S1) S2, setter func(T) func(S1) S2,
f func(S1) T, f func(S1) T,
) func([]S1) []S2 { ) Operator[S1, S2] {
return G.Let[[]S1, []S2](setter, f) return G.Let[[]S1, []S2](setter, f)
} }
@@ -101,7 +101,7 @@ func Let[S1, S2, T any](
func LetTo[S1, S2, T any]( func LetTo[S1, S2, T any](
setter func(T) func(S1) S2, setter func(T) func(S1) S2,
b T, b T,
) func([]S1) []S2 { ) Operator[S1, S2] {
return G.LetTo[[]S1, []S2](setter, b) return G.LetTo[[]S1, []S2](setter, b)
} }
@@ -120,7 +120,7 @@ func LetTo[S1, S2, T any](
//go:inline //go:inline
func BindTo[S1, T any]( func BindTo[S1, T any](
setter func(T) S1, setter func(T) S1,
) func([]T) []S1 { ) Operator[T, S1] {
return G.BindTo[[]S1, []T](setter) return G.BindTo[[]S1, []T](setter)
} }
@@ -143,6 +143,6 @@ func BindTo[S1, T any](
func ApS[S1, S2, T any]( func ApS[S1, S2, T any](
setter func(T) func(S1) S2, setter func(T) func(S1) S2,
fa []T, fa []T,
) func([]S1) []S2 { ) Operator[S1, S2] {
return G.ApS[[]S1, []S2](setter, fa) return G.ApS[[]S1, []S2](setter, fa)
} }

View File

@@ -36,7 +36,7 @@
// generated := array.MakeBy(5, func(i int) int { return i * 2 }) // generated := array.MakeBy(5, func(i int) int { return i * 2 })
// //
// // Transforming arrays // // Transforming arrays
// doubled := array.Map(func(x int) int { return x * 2 })(arr) // doubled := array.Map(N.Mul(2))(arr)
// filtered := array.Filter(func(x int) bool { return x > 2 })(arr) // filtered := array.Filter(func(x int) bool { return x > 2 })(arr)
// //
// // Combining arrays // // Combining arrays
@@ -50,7 +50,7 @@
// numbers := []int{1, 2, 3, 4, 5} // numbers := []int{1, 2, 3, 4, 5}
// //
// // Map transforms each element // // Map transforms each element
// doubled := array.Map(func(x int) int { return x * 2 })(numbers) // doubled := array.Map(N.Mul(2))(numbers)
// // Result: [2, 4, 6, 8, 10] // // Result: [2, 4, 6, 8, 10]
// //
// // Filter keeps elements matching a predicate // // Filter keeps elements matching a predicate

View File

@@ -19,7 +19,7 @@ import (
E "github.com/IBM/fp-go/v2/eq" E "github.com/IBM/fp-go/v2/eq"
) )
func equals[T any](left []T, right []T, eq func(T, T) bool) bool { func equals[T any](left, right []T, eq func(T, T) bool) bool {
if len(left) != len(right) { if len(left) != len(right) {
return false return false
} }

View File

@@ -17,7 +17,7 @@ package array
import ( import (
G "github.com/IBM/fp-go/v2/array/generic" G "github.com/IBM/fp-go/v2/array/generic"
O "github.com/IBM/fp-go/v2/option" "github.com/IBM/fp-go/v2/option"
) )
// FindFirst finds the first element which satisfies a predicate function. // FindFirst finds the first element which satisfies a predicate function.
@@ -30,7 +30,7 @@ import (
// result2 := findGreaterThan3([]int{1, 2, 3}) // None // result2 := findGreaterThan3([]int{1, 2, 3}) // None
// //
//go:inline //go:inline
func FindFirst[A any](pred func(A) bool) func([]A) O.Option[A] { func FindFirst[A any](pred func(A) bool) option.Kleisli[[]A, A] {
return G.FindFirst[[]A](pred) return G.FindFirst[[]A](pred)
} }
@@ -45,7 +45,7 @@ func FindFirst[A any](pred func(A) bool) func([]A) O.Option[A] {
// result := findEvenAtEvenIndex([]int{1, 3, 4, 5}) // Some(4) // result := findEvenAtEvenIndex([]int{1, 3, 4, 5}) // Some(4)
// //
//go:inline //go:inline
func FindFirstWithIndex[A any](pred func(int, A) bool) func([]A) O.Option[A] { func FindFirstWithIndex[A any](pred func(int, A) bool) option.Kleisli[[]A, A] {
return G.FindFirstWithIndex[[]A](pred) return G.FindFirstWithIndex[[]A](pred)
} }
@@ -65,7 +65,7 @@ func FindFirstWithIndex[A any](pred func(int, A) bool) func([]A) O.Option[A] {
// result := parseFirst([]string{"a", "42", "b"}) // Some(42) // result := parseFirst([]string{"a", "42", "b"}) // Some(42)
// //
//go:inline //go:inline
func FindFirstMap[A, B any](sel func(A) O.Option[B]) func([]A) O.Option[B] { func FindFirstMap[A, B any](sel option.Kleisli[A, B]) option.Kleisli[[]A, B] {
return G.FindFirstMap[[]A](sel) return G.FindFirstMap[[]A](sel)
} }
@@ -73,7 +73,7 @@ func FindFirstMap[A, B any](sel func(A) O.Option[B]) func([]A) O.Option[B] {
// The selector receives both the index and the element. // The selector receives both the index and the element.
// //
//go:inline //go:inline
func FindFirstMapWithIndex[A, B any](sel func(int, A) O.Option[B]) func([]A) O.Option[B] { func FindFirstMapWithIndex[A, B any](sel func(int, A) Option[B]) option.Kleisli[[]A, B] {
return G.FindFirstMapWithIndex[[]A](sel) return G.FindFirstMapWithIndex[[]A](sel)
} }
@@ -86,7 +86,7 @@ func FindFirstMapWithIndex[A, B any](sel func(int, A) O.Option[B]) func([]A) O.O
// result := findGreaterThan3([]int{1, 4, 2, 5}) // Some(5) // result := findGreaterThan3([]int{1, 4, 2, 5}) // Some(5)
// //
//go:inline //go:inline
func FindLast[A any](pred func(A) bool) func([]A) O.Option[A] { func FindLast[A any](pred func(A) bool) option.Kleisli[[]A, A] {
return G.FindLast[[]A](pred) return G.FindLast[[]A](pred)
} }
@@ -94,7 +94,7 @@ func FindLast[A any](pred func(A) bool) func([]A) O.Option[A] {
// Returns Some(element) if found, None if no element matches. // Returns Some(element) if found, None if no element matches.
// //
//go:inline //go:inline
func FindLastWithIndex[A any](pred func(int, A) bool) func([]A) O.Option[A] { func FindLastWithIndex[A any](pred func(int, A) bool) option.Kleisli[[]A, A] {
return G.FindLastWithIndex[[]A](pred) return G.FindLastWithIndex[[]A](pred)
} }
@@ -102,7 +102,7 @@ func FindLastWithIndex[A any](pred func(int, A) bool) func([]A) O.Option[A] {
// This combines finding and mapping in a single operation, searching from the end. // This combines finding and mapping in a single operation, searching from the end.
// //
//go:inline //go:inline
func FindLastMap[A, B any](sel func(A) O.Option[B]) func([]A) O.Option[B] { func FindLastMap[A, B any](sel option.Kleisli[A, B]) option.Kleisli[[]A, B] {
return G.FindLastMap[[]A](sel) return G.FindLastMap[[]A](sel)
} }
@@ -110,6 +110,6 @@ func FindLastMap[A, B any](sel func(A) O.Option[B]) func([]A) O.Option[B] {
// The selector receives both the index and the element, searching from the end. // The selector receives both the index and the element, searching from the end.
// //
//go:inline //go:inline
func FindLastMapWithIndex[A, B any](sel func(int, A) O.Option[B]) func([]A) O.Option[B] { func FindLastMapWithIndex[A, B any](sel func(int, A) Option[B]) option.Kleisli[[]A, B] {
return G.FindLastMapWithIndex[[]A](sel) return G.FindLastMapWithIndex[[]A](sel)
} }

View File

@@ -25,8 +25,10 @@ import (
) )
// Of constructs a single element array // Of constructs a single element array
//
//go:inline
func Of[GA ~[]A, A any](value A) GA { func Of[GA ~[]A, A any](value A) GA {
return GA{value} return array.Of[GA](value)
} }
func Reduce[GA ~[]A, A, B any](f func(B, A) B, initial B) func(GA) B { func Reduce[GA ~[]A, A, B any](f func(B, A) B, initial B) func(GA) B {
@@ -82,7 +84,7 @@ func MakeBy[AS ~[]A, F ~func(int) A, A any](n int, f F) AS {
} }
// run the generator function across the input // run the generator function across the input
as := make(AS, n) as := make(AS, n)
for i := n - 1; i >= 0; i-- { for i := range n {
as[i] = f(i) as[i] = f(i)
} }
return as return as
@@ -138,22 +140,27 @@ func Empty[GA ~[]A, A any]() GA {
return array.Empty[GA]() return array.Empty[GA]()
} }
//go:inline
func UpsertAt[GA ~[]A, A any](a A) func(GA) GA { func UpsertAt[GA ~[]A, A any](a A) func(GA) GA {
return array.UpsertAt[GA](a) return array.UpsertAt[GA](a)
} }
//go:inline
func MonadMap[GA ~[]A, GB ~[]B, A, B any](as GA, f func(a A) B) GB { func MonadMap[GA ~[]A, GB ~[]B, A, B any](as GA, f func(a A) B) GB {
return array.MonadMap[GA, GB](as, f) return array.MonadMap[GA, GB](as, f)
} }
//go:inline
func Map[GA ~[]A, GB ~[]B, A, B any](f func(a A) B) func(GA) GB { func Map[GA ~[]A, GB ~[]B, A, B any](f func(a A) B) func(GA) GB {
return array.Map[GA, GB](f) return array.Map[GA, GB](f)
} }
//go:inline
func MonadMapWithIndex[GA ~[]A, GB ~[]B, A, B any](as GA, f func(int, A) B) GB { func MonadMapWithIndex[GA ~[]A, GB ~[]B, A, B any](as GA, f func(int, A) B) GB {
return array.MonadMapWithIndex[GA, GB](as, f) return array.MonadMapWithIndex[GA, GB](as, f)
} }
//go:inline
func MapWithIndex[GA ~[]A, GB ~[]B, A, B any](f func(int, A) B) func(GA) GB { func MapWithIndex[GA ~[]A, GB ~[]B, A, B any](f func(int, A) B) func(GA) GB {
return F.Bind2nd(MonadMapWithIndex[GA, GB, A, B], f) return F.Bind2nd(MonadMapWithIndex[GA, GB, A, B], f)
} }
@@ -165,10 +172,9 @@ func Size[GA ~[]A, A any](as GA) int {
func filterMap[GA ~[]A, GB ~[]B, A, B any](fa GA, f func(A) O.Option[B]) GB { func filterMap[GA ~[]A, GB ~[]B, A, B any](fa GA, f func(A) O.Option[B]) GB {
result := make(GB, 0, len(fa)) result := make(GB, 0, len(fa))
for _, a := range fa { for _, a := range fa {
O.Map(func(b B) B { if b, ok := O.Unwrap(f(a)); ok {
result = append(result, b) result = append(result, b)
return b }
})(f(a))
} }
return result return result
} }
@@ -176,10 +182,9 @@ func filterMap[GA ~[]A, GB ~[]B, A, B any](fa GA, f func(A) O.Option[B]) GB {
func filterMapWithIndex[GA ~[]A, GB ~[]B, A, B any](fa GA, f func(int, A) O.Option[B]) GB { func filterMapWithIndex[GA ~[]A, GB ~[]B, A, B any](fa GA, f func(int, A) O.Option[B]) GB {
result := make(GB, 0, len(fa)) result := make(GB, 0, len(fa))
for i, a := range fa { for i, a := range fa {
O.Map(func(b B) B { if b, ok := O.Unwrap(f(i, a)); ok {
result = append(result, b) result = append(result, b)
return b }
})(f(i, a))
} }
return result return result
} }
@@ -297,7 +302,7 @@ func MatchLeft[AS ~[]A, A, B any](onEmpty func() B, onNonEmpty func(A, AS) B) fu
} }
//go:inline //go:inline
func Slice[AS ~[]A, A any](start int, end int) func(AS) AS { func Slice[AS ~[]A, A any](start, end int) func(AS) AS {
return array.Slice[AS](start, end) return array.Slice[AS](start, end)
} }
@@ -361,6 +366,12 @@ func Flap[FAB ~func(A) B, GFAB ~[]FAB, GB ~[]B, A, B any](a A) func(GFAB) GB {
return FC.Flap(Map[GFAB, GB], a) return FC.Flap(Map[GFAB, GB], a)
} }
//go:inline
func Prepend[ENDO ~func(AS) AS, AS []A, A any](head A) ENDO { func Prepend[ENDO ~func(AS) AS, AS []A, A any](head A) ENDO {
return array.Prepend[ENDO](head) return array.Prepend[ENDO](head)
} }
//go:inline
func Reverse[GT ~[]T, T any](as GT) GT {
return array.Reverse(as)
}

View File

@@ -42,8 +42,7 @@ func FindFirst[AS ~[]A, PRED ~func(A) bool, A any](pred PRED) func(AS) O.Option[
func FindFirstMapWithIndex[AS ~[]A, PRED ~func(int, A) O.Option[B], A, B any](pred PRED) func(AS) O.Option[B] { func FindFirstMapWithIndex[AS ~[]A, PRED ~func(int, A) O.Option[B], A, B any](pred PRED) func(AS) O.Option[B] {
none := O.None[B]() none := O.None[B]()
return func(as AS) O.Option[B] { return func(as AS) O.Option[B] {
count := len(as) for i := range len(as) {
for i := 0; i < count; i++ {
out := pred(i, as[i]) out := pred(i, as[i])
if O.IsSome(out) { if O.IsSome(out) {
return out return out

View File

@@ -0,0 +1,34 @@
package generic
import (
"github.com/IBM/fp-go/v2/internal/array"
M "github.com/IBM/fp-go/v2/monoid"
S "github.com/IBM/fp-go/v2/semigroup"
)
// Monoid returns a Monoid instance for arrays.
// The Monoid combines arrays through concatenation, with an empty array as the identity element.
//
// Example:
//
// m := array.Monoid[int]()
// result := m.Concat([]int{1, 2}, []int{3, 4}) // [1, 2, 3, 4]
// empty := m.Empty() // []
//
//go:inline
func Monoid[GT ~[]T, T any]() M.Monoid[GT] {
return M.MakeMonoid(array.Concat[GT], Empty[GT]())
}
// Semigroup returns a Semigroup instance for arrays.
// The Semigroup combines arrays through concatenation.
//
// Example:
//
// s := array.Semigroup[int]()
// result := s.Concat([]int{1, 2}, []int{3, 4}) // [1, 2, 3, 4]
//
//go:inline
func Semigroup[GT ~[]T, T any]() S.Semigroup[GT] {
return S.MakeSemigroup(array.Concat[GT])
}

View File

@@ -26,7 +26,7 @@ import (
func ZipWith[AS ~[]A, BS ~[]B, CS ~[]C, FCT ~func(A, B) C, A, B, C any](fa AS, fb BS, f FCT) CS { func ZipWith[AS ~[]A, BS ~[]B, CS ~[]C, FCT ~func(A, B) C, A, B, C any](fa AS, fb BS, f FCT) CS {
l := N.Min(len(fa), len(fb)) l := N.Min(len(fa), len(fb))
res := make(CS, l) res := make(CS, l)
for i := l - 1; i >= 0; i-- { for i := range l {
res[i] = f(fa[i], fb[i]) res[i] = f(fa[i], fb[i])
} }
return res return res
@@ -43,7 +43,7 @@ func Unzip[AS ~[]A, BS ~[]B, CS ~[]T.Tuple2[A, B], A, B any](cs CS) T.Tuple2[AS,
l := len(cs) l := len(cs)
as := make(AS, l) as := make(AS, l)
bs := make(BS, l) bs := make(BS, l)
for i := l - 1; i >= 0; i-- { for i := range l {
t := cs[i] t := cs[i]
as[i] = t.F1 as[i] = t.F1
bs[i] = t.F2 bs[i] = t.F2

View File

@@ -18,7 +18,6 @@ package array
import ( import (
"testing" "testing"
O "github.com/IBM/fp-go/v2/option"
OR "github.com/IBM/fp-go/v2/ord" OR "github.com/IBM/fp-go/v2/ord"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -103,39 +102,6 @@ func TestSortByKey(t *testing.T) {
assert.Equal(t, "Charlie", result[2].Name) assert.Equal(t, "Charlie", result[2].Name)
} }
func TestMonadTraverse(t *testing.T) {
result := MonadTraverse(
O.Of[[]int],
O.Map[[]int, func(int) []int],
O.Ap[[]int, int],
[]int{1, 3, 5},
func(n int) O.Option[int] {
if n%2 == 1 {
return O.Some(n * 2)
}
return O.None[int]()
},
)
assert.Equal(t, O.Some([]int{2, 6, 10}), result)
// Test with None case
result2 := MonadTraverse(
O.Of[[]int],
O.Map[[]int, func(int) []int],
O.Ap[[]int, int],
[]int{1, 2, 3},
func(n int) O.Option[int] {
if n%2 == 1 {
return O.Some(n * 2)
}
return O.None[int]()
},
)
assert.Equal(t, O.None[[]int](), result2)
}
func TestUniqByKey(t *testing.T) { func TestUniqByKey(t *testing.T) {
type Person struct { type Person struct {
Name string Name string

View File

@@ -16,27 +16,12 @@
package array package array
import ( import (
G "github.com/IBM/fp-go/v2/array/generic"
"github.com/IBM/fp-go/v2/internal/array" "github.com/IBM/fp-go/v2/internal/array"
M "github.com/IBM/fp-go/v2/monoid" M "github.com/IBM/fp-go/v2/monoid"
S "github.com/IBM/fp-go/v2/semigroup" S "github.com/IBM/fp-go/v2/semigroup"
) )
func concat[T any](left, right []T) []T {
// some performance checks
ll := len(left)
if ll == 0 {
return right
}
lr := len(right)
if lr == 0 {
return left
}
// need to copy
buf := make([]T, ll+lr)
copy(buf[copy(buf, left):], right)
return buf
}
// Monoid returns a Monoid instance for arrays. // Monoid returns a Monoid instance for arrays.
// The Monoid combines arrays through concatenation, with an empty array as the identity element. // The Monoid combines arrays through concatenation, with an empty array as the identity element.
// //
@@ -45,8 +30,10 @@ func concat[T any](left, right []T) []T {
// m := array.Monoid[int]() // m := array.Monoid[int]()
// result := m.Concat([]int{1, 2}, []int{3, 4}) // [1, 2, 3, 4] // result := m.Concat([]int{1, 2}, []int{3, 4}) // [1, 2, 3, 4]
// empty := m.Empty() // [] // empty := m.Empty() // []
//
//go:inline
func Monoid[T any]() M.Monoid[[]T] { func Monoid[T any]() M.Monoid[[]T] {
return M.MakeMonoid(concat[T], Empty[T]()) return G.Monoid[[]T]()
} }
// Semigroup returns a Semigroup instance for arrays. // Semigroup returns a Semigroup instance for arrays.
@@ -56,8 +43,10 @@ func Monoid[T any]() M.Monoid[[]T] {
// //
// s := array.Semigroup[int]() // s := array.Semigroup[int]()
// result := s.Concat([]int{1, 2}, []int{3, 4}) // [1, 2, 3, 4] // result := s.Concat([]int{1, 2}, []int{3, 4}) // [1, 2, 3, 4]
//
//go:inline
func Semigroup[T any]() S.Semigroup[[]T] { func Semigroup[T any]() S.Semigroup[[]T] {
return S.MakeSemigroup(concat[T]) return G.Semigroup[[]T]()
} }
func addLen[A any](count int, data []A) int { func addLen[A any](count int, data []A) int {

View File

@@ -18,14 +18,11 @@ package nonempty
import ( import (
G "github.com/IBM/fp-go/v2/array/generic" G "github.com/IBM/fp-go/v2/array/generic"
EM "github.com/IBM/fp-go/v2/endomorphism" EM "github.com/IBM/fp-go/v2/endomorphism"
F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/internal/array" "github.com/IBM/fp-go/v2/internal/array"
"github.com/IBM/fp-go/v2/option"
S "github.com/IBM/fp-go/v2/semigroup" S "github.com/IBM/fp-go/v2/semigroup"
) )
// NonEmptyArray represents an array with at least one element
type NonEmptyArray[A any] []A
// Of constructs a single element array // Of constructs a single element array
func Of[A any](first A) NonEmptyArray[A] { func Of[A any](first A) NonEmptyArray[A] {
return G.Of[NonEmptyArray[A]](first) return G.Of[NonEmptyArray[A]](first)
@@ -44,20 +41,24 @@ func From[A any](first A, data ...A) NonEmptyArray[A] {
return buffer return buffer
} }
//go:inline
func IsEmpty[A any](_ NonEmptyArray[A]) bool { func IsEmpty[A any](_ NonEmptyArray[A]) bool {
return false return false
} }
//go:inline
func IsNonEmpty[A any](_ NonEmptyArray[A]) bool { func IsNonEmpty[A any](_ NonEmptyArray[A]) bool {
return true return true
} }
//go:inline
func MonadMap[A, B any](as NonEmptyArray[A], f func(a A) B) NonEmptyArray[B] { func MonadMap[A, B any](as NonEmptyArray[A], f func(a A) B) NonEmptyArray[B] {
return G.MonadMap[NonEmptyArray[A], NonEmptyArray[B]](as, f) return G.MonadMap[NonEmptyArray[A], NonEmptyArray[B]](as, f)
} }
func Map[A, B any](f func(a A) B) func(NonEmptyArray[A]) NonEmptyArray[B] { //go:inline
return F.Bind2nd(MonadMap[A, B], f) func Map[A, B any](f func(a A) B) Operator[A, B] {
return G.Map[NonEmptyArray[A], NonEmptyArray[B]](f)
} }
func Reduce[A, B any](f func(B, A) B, initial B) func(NonEmptyArray[A]) B { func Reduce[A, B any](f func(B, A) B, initial B) func(NonEmptyArray[A]) B {
@@ -72,22 +73,27 @@ func ReduceRight[A, B any](f func(A, B) B, initial B) func(NonEmptyArray[A]) B {
} }
} }
//go:inline
func Tail[A any](as NonEmptyArray[A]) []A { func Tail[A any](as NonEmptyArray[A]) []A {
return as[1:] return as[1:]
} }
//go:inline
func Head[A any](as NonEmptyArray[A]) A { func Head[A any](as NonEmptyArray[A]) A {
return as[0] return as[0]
} }
//go:inline
func First[A any](as NonEmptyArray[A]) A { func First[A any](as NonEmptyArray[A]) A {
return as[0] return as[0]
} }
//go:inline
func Last[A any](as NonEmptyArray[A]) A { func Last[A any](as NonEmptyArray[A]) A {
return as[len(as)-1] return as[len(as)-1]
} }
//go:inline
func Size[A any](as NonEmptyArray[A]) int { func Size[A any](as NonEmptyArray[A]) int {
return G.Size(as) return G.Size(as)
} }
@@ -96,11 +102,11 @@ func Flatten[A any](mma NonEmptyArray[NonEmptyArray[A]]) NonEmptyArray[A] {
return G.Flatten(mma) return G.Flatten(mma)
} }
func MonadChain[A, B any](fa NonEmptyArray[A], f func(a A) NonEmptyArray[B]) NonEmptyArray[B] { func MonadChain[A, B any](fa NonEmptyArray[A], f Kleisli[A, B]) NonEmptyArray[B] {
return G.MonadChain(fa, f) return G.MonadChain(fa, f)
} }
func Chain[A, B any](f func(A) NonEmptyArray[B]) func(NonEmptyArray[A]) NonEmptyArray[B] { func Chain[A, B any](f func(A) NonEmptyArray[B]) Operator[A, B] {
return G.Chain[NonEmptyArray[A]](f) return G.Chain[NonEmptyArray[A]](f)
} }
@@ -134,3 +140,89 @@ func Fold[A any](s S.Semigroup[A]) func(NonEmptyArray[A]) A {
func Prepend[A any](head A) EM.Endomorphism[NonEmptyArray[A]] { func Prepend[A any](head A) EM.Endomorphism[NonEmptyArray[A]] {
return array.Prepend[EM.Endomorphism[NonEmptyArray[A]]](head) return array.Prepend[EM.Endomorphism[NonEmptyArray[A]]](head)
} }
// ToNonEmptyArray attempts to convert a regular slice into a NonEmptyArray.
// This function provides a safe way to create a NonEmptyArray from a slice that might be empty,
// returning an Option type to handle the case where the input slice is empty.
//
// Type Parameters:
// - A: The element type of the array
//
// Parameters:
// - as: A regular slice that may or may not be empty
//
// Returns:
// - Option[NonEmptyArray[A]]: Some(NonEmptyArray) if the input slice is non-empty, None if empty
//
// Behavior:
// - If the input slice is empty, returns None
// - If the input slice has at least one element, wraps it in Some and returns it as a NonEmptyArray
// - The conversion is a type cast, so no data is copied
//
// Example:
//
// // Convert non-empty slice
// numbers := []int{1, 2, 3}
// result := ToNonEmptyArray(numbers) // Some(NonEmptyArray[1, 2, 3])
//
// // Convert empty slice
// empty := []int{}
// result := ToNonEmptyArray(empty) // None
//
// // Use with Option methods
// numbers := []int{1, 2, 3}
// result := ToNonEmptyArray(numbers)
// if O.IsSome(result) {
// nea := O.GetOrElse(F.Constant(From(0)))(result)
// head := Head(nea) // 1
// }
//
// Use cases:
// - Safely converting user input or external data to NonEmptyArray
// - Validating that a collection has at least one element before processing
// - Converting results from functions that return regular slices
// - Ensuring type safety when working with collections that must not be empty
//
// Example with validation:
//
// func processItems(items []string) Option[string] {
// return F.Pipe2(
// items,
// ToNonEmptyArray[string],
// O.Map(func(nea NonEmptyArray[string]) string {
// return Head(nea) // Safe to get head since we know it's non-empty
// }),
// )
// }
//
// Example with error handling:
//
// items := []int{1, 2, 3}
// result := ToNonEmptyArray(items)
// switch {
// case O.IsSome(result):
// nea := O.GetOrElse(F.Constant(From(0)))(result)
// fmt.Println("First item:", Head(nea))
// case O.IsNone(result):
// fmt.Println("Array is empty")
// }
//
// Example with chaining:
//
// // Process only if non-empty
// result := F.Pipe3(
// []int{1, 2, 3},
// ToNonEmptyArray[int],
// O.Map(Map(func(x int) int { return x * 2 })),
// O.Map(Head[int]),
// ) // Some(2)
//
// Note: This function is particularly useful when working with APIs or functions
// that return regular slices but you need the type-level guarantee that the
// collection is non-empty for subsequent operations.
func ToNonEmptyArray[A any](as []A) Option[NonEmptyArray[A]] {
if G.IsEmpty(as) {
return option.None[NonEmptyArray[A]]()
}
return option.Some(NonEmptyArray[A](as))
}

View File

@@ -0,0 +1,370 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package nonempty
import (
"testing"
F "github.com/IBM/fp-go/v2/function"
O "github.com/IBM/fp-go/v2/option"
"github.com/stretchr/testify/assert"
)
// TestToNonEmptyArray tests the ToNonEmptyArray function
func TestToNonEmptyArray(t *testing.T) {
t.Run("Convert non-empty slice of integers", func(t *testing.T) {
input := []int{1, 2, 3}
result := ToNonEmptyArray(input)
assert.True(t, O.IsSome(result))
nea := O.GetOrElse(F.Constant(From(0)))(result)
assert.Equal(t, 3, Size(nea))
assert.Equal(t, 1, Head(nea))
assert.Equal(t, 3, Last(nea))
})
t.Run("Convert empty slice returns None", func(t *testing.T) {
input := []int{}
result := ToNonEmptyArray(input)
assert.True(t, O.IsNone(result))
})
t.Run("Convert single element slice", func(t *testing.T) {
input := []string{"hello"}
result := ToNonEmptyArray(input)
assert.True(t, O.IsSome(result))
nea := O.GetOrElse(F.Constant(From("")))(result)
assert.Equal(t, 1, Size(nea))
assert.Equal(t, "hello", Head(nea))
})
t.Run("Convert non-empty slice of strings", func(t *testing.T) {
input := []string{"a", "b", "c", "d"}
result := ToNonEmptyArray(input)
assert.True(t, O.IsSome(result))
nea := O.GetOrElse(F.Constant(From("")))(result)
assert.Equal(t, 4, Size(nea))
assert.Equal(t, "a", Head(nea))
assert.Equal(t, "d", Last(nea))
})
t.Run("Convert nil slice returns None", func(t *testing.T) {
var input []int
result := ToNonEmptyArray(input)
assert.True(t, O.IsNone(result))
})
t.Run("Convert slice with struct elements", func(t *testing.T) {
type Person struct {
Name string
Age int
}
input := []Person{
{Name: "Alice", Age: 30},
{Name: "Bob", Age: 25},
}
result := ToNonEmptyArray(input)
assert.True(t, O.IsSome(result))
nea := O.GetOrElse(F.Constant(From(Person{})))(result)
assert.Equal(t, 2, Size(nea))
assert.Equal(t, "Alice", Head(nea).Name)
})
t.Run("Convert slice with pointer elements", func(t *testing.T) {
val1, val2 := 10, 20
input := []*int{&val1, &val2}
result := ToNonEmptyArray(input)
assert.True(t, O.IsSome(result))
nea := O.GetOrElse(F.Constant(From[*int](nil)))(result)
assert.Equal(t, 2, Size(nea))
assert.Equal(t, 10, *Head(nea))
})
t.Run("Convert large slice", func(t *testing.T) {
input := make([]int, 1000)
for i := range input {
input[i] = i
}
result := ToNonEmptyArray(input)
assert.True(t, O.IsSome(result))
nea := O.GetOrElse(F.Constant(From(0)))(result)
assert.Equal(t, 1000, Size(nea))
assert.Equal(t, 0, Head(nea))
assert.Equal(t, 999, Last(nea))
})
t.Run("Convert slice with float64 elements", func(t *testing.T) {
input := []float64{1.5, 2.5, 3.5}
result := ToNonEmptyArray(input)
assert.True(t, O.IsSome(result))
nea := O.GetOrElse(F.Constant(From(0.0)))(result)
assert.Equal(t, 3, Size(nea))
assert.Equal(t, 1.5, Head(nea))
})
t.Run("Convert slice with boolean elements", func(t *testing.T) {
input := []bool{true, false, true}
result := ToNonEmptyArray(input)
assert.True(t, O.IsSome(result))
nea := O.GetOrElse(F.Constant(From(false)))(result)
assert.Equal(t, 3, Size(nea))
assert.True(t, Head(nea))
})
}
// TestToNonEmptyArrayWithOption tests ToNonEmptyArray with Option operations
func TestToNonEmptyArrayWithOption(t *testing.T) {
t.Run("Chain with Map to process elements", func(t *testing.T) {
input := []int{1, 2, 3}
result := F.Pipe2(
input,
ToNonEmptyArray[int],
O.Map(Map(func(x int) int { return x * 2 })),
)
assert.True(t, O.IsSome(result))
nea := O.GetOrElse(F.Constant(From(0)))(result)
assert.Equal(t, 2, Head(nea))
assert.Equal(t, 6, Last(nea))
})
t.Run("Chain with Map to get head", func(t *testing.T) {
input := []string{"first", "second", "third"}
result := F.Pipe2(
input,
ToNonEmptyArray[string],
O.Map(Head[string]),
)
assert.True(t, O.IsSome(result))
value := O.GetOrElse(F.Constant(""))(result)
assert.Equal(t, "first", value)
})
t.Run("GetOrElse with default value for empty slice", func(t *testing.T) {
input := []int{}
defaultValue := From(42)
result := F.Pipe2(
input,
ToNonEmptyArray[int],
O.GetOrElse(F.Constant(defaultValue)),
)
assert.Equal(t, 1, Size(result))
assert.Equal(t, 42, Head(result))
})
t.Run("GetOrElse with default value for non-empty slice", func(t *testing.T) {
input := []int{1, 2, 3}
defaultValue := From(42)
result := F.Pipe2(
input,
ToNonEmptyArray[int],
O.GetOrElse(F.Constant(defaultValue)),
)
assert.Equal(t, 3, Size(result))
assert.Equal(t, 1, Head(result))
})
t.Run("Fold with Some case", func(t *testing.T) {
input := []int{1, 2, 3}
result := F.Pipe2(
input,
ToNonEmptyArray[int],
O.Fold(
F.Constant(0),
func(nea NonEmptyArray[int]) int { return Head(nea) },
),
)
assert.Equal(t, 1, result)
})
t.Run("Fold with None case", func(t *testing.T) {
input := []int{}
result := F.Pipe2(
input,
ToNonEmptyArray[int],
O.Fold(
F.Constant(-1),
func(nea NonEmptyArray[int]) int { return Head(nea) },
),
)
assert.Equal(t, -1, result)
})
}
// TestToNonEmptyArrayComposition tests composing ToNonEmptyArray with other operations
func TestToNonEmptyArrayComposition(t *testing.T) {
t.Run("Compose with filter-like operation", func(t *testing.T) {
input := []int{1, 2, 3, 4, 5}
// Filter even numbers then convert
filtered := []int{}
for _, x := range input {
if x%2 == 0 {
filtered = append(filtered, x)
}
}
result := ToNonEmptyArray(filtered)
assert.True(t, O.IsSome(result))
nea := O.GetOrElse(F.Constant(From(0)))(result)
assert.Equal(t, 2, Size(nea))
assert.Equal(t, 2, Head(nea))
})
t.Run("Compose with map operation before conversion", func(t *testing.T) {
input := []int{1, 2, 3}
// Map then convert
mapped := make([]int, len(input))
for i, x := range input {
mapped[i] = x * 10
}
result := ToNonEmptyArray(mapped)
assert.True(t, O.IsSome(result))
nea := O.GetOrElse(F.Constant(From(0)))(result)
assert.Equal(t, 10, Head(nea))
assert.Equal(t, 30, Last(nea))
})
t.Run("Chain multiple Option operations", func(t *testing.T) {
input := []int{5, 10, 15}
result := F.Pipe3(
input,
ToNonEmptyArray[int],
O.Map(Map(func(x int) int { return x / 5 })),
O.Map(func(nea NonEmptyArray[int]) int {
return Head(nea) + Last(nea)
}),
)
assert.True(t, O.IsSome(result))
value := O.GetOrElse(F.Constant(0))(result)
assert.Equal(t, 4, value) // 1 + 3
})
}
// TestToNonEmptyArrayUseCases demonstrates practical use cases
func TestToNonEmptyArrayUseCases(t *testing.T) {
t.Run("Validate user input has at least one item", func(t *testing.T) {
// Simulate user input
userInput := []string{"item1", "item2"}
result := ToNonEmptyArray(userInput)
if O.IsSome(result) {
nea := O.GetOrElse(F.Constant(From("")))(result)
firstItem := Head(nea)
assert.Equal(t, "item1", firstItem)
} else {
t.Fatal("Expected Some but got None")
}
})
t.Run("Process only non-empty collections", func(t *testing.T) {
processItems := func(items []int) Option[int] {
return F.Pipe2(
items,
ToNonEmptyArray[int],
O.Map(func(nea NonEmptyArray[int]) int {
// Safe to use Head since we know it's non-empty
return Head(nea) * 2
}),
)
}
result1 := processItems([]int{5, 10, 15})
assert.True(t, O.IsSome(result1))
assert.Equal(t, 10, O.GetOrElse(F.Constant(0))(result1))
result2 := processItems([]int{})
assert.True(t, O.IsNone(result2))
})
t.Run("Convert API response to NonEmptyArray", func(t *testing.T) {
// Simulate API response
type APIResponse struct {
Items []string
}
response := APIResponse{Items: []string{"data1", "data2", "data3"}}
result := F.Pipe2(
response.Items,
ToNonEmptyArray[string],
O.Map(func(nea NonEmptyArray[string]) string {
return "First item: " + Head(nea)
}),
)
assert.True(t, O.IsSome(result))
message := O.GetOrElse(F.Constant("No items"))(result)
assert.Equal(t, "First item: data1", message)
})
t.Run("Ensure collection is non-empty before processing", func(t *testing.T) {
calculateAverage := func(numbers []float64) Option[float64] {
return F.Pipe2(
numbers,
ToNonEmptyArray[float64],
O.Map(func(nea NonEmptyArray[float64]) float64 {
sum := 0.0
for _, n := range nea {
sum += n
}
return sum / float64(Size(nea))
}),
)
}
result1 := calculateAverage([]float64{10.0, 20.0, 30.0})
assert.True(t, O.IsSome(result1))
assert.Equal(t, 20.0, O.GetOrElse(F.Constant(0.0))(result1))
result2 := calculateAverage([]float64{})
assert.True(t, O.IsNone(result2))
})
t.Run("Safe head extraction with type guarantee", func(t *testing.T) {
getFirstOrDefault := func(items []string, defaultValue string) string {
return F.Pipe2(
items,
ToNonEmptyArray[string],
O.Fold(
F.Constant(defaultValue),
Head[string],
),
)
}
result1 := getFirstOrDefault([]string{"a", "b", "c"}, "default")
assert.Equal(t, "a", result1)
result2 := getFirstOrDefault([]string{}, "default")
assert.Equal(t, "default", result2)
})
}

View File

@@ -0,0 +1,15 @@
package nonempty
import "github.com/IBM/fp-go/v2/option"
type (
// NonEmptyArray represents an array with at least one element
NonEmptyArray[A any] []A
Kleisli[A, B any] = func(A) NonEmptyArray[B]
Operator[A, B any] = Kleisli[NonEmptyArray[A], B]
Option[A any] = option.Option[A]
)

View File

@@ -16,10 +16,18 @@
package array package array
import ( import (
F "github.com/IBM/fp-go/v2/function" "github.com/IBM/fp-go/v2/internal/array"
M "github.com/IBM/fp-go/v2/monoid"
O "github.com/IBM/fp-go/v2/option" O "github.com/IBM/fp-go/v2/option"
) )
func MonadSequence[HKTA, HKTRA any](
fof func(HKTA) HKTRA,
m M.Monoid[HKTRA],
ma []HKTA) HKTRA {
return array.MonadSequence(fof, m.Empty(), m.Concat, ma)
}
// Sequence takes an array where elements are HKT<A> (higher kinded type) and, // Sequence takes an array where elements are HKT<A> (higher kinded type) and,
// using an applicative of that HKT, returns an HKT of []A. // using an applicative of that HKT, returns an HKT of []A.
// //
@@ -55,16 +63,11 @@ import (
// option.MonadAp[[]int, int], // option.MonadAp[[]int, int],
// ) // )
// result := seq(opts) // Some([1, 2, 3]) // result := seq(opts) // Some([1, 2, 3])
func Sequence[A, HKTA, HKTRA, HKTFRA any]( func Sequence[HKTA, HKTRA any](
_of func([]A) HKTRA, fof func(HKTA) HKTRA,
_map func(HKTRA, func([]A) func(A) []A) HKTFRA, m M.Monoid[HKTRA],
_ap func(HKTFRA, HKTA) HKTRA,
) func([]HKTA) HKTRA { ) func([]HKTA) HKTRA {
ca := F.Curry2(Append[A]) return array.Sequence[[]HKTA](fof, m.Empty(), m.Concat)
empty := _of(Empty[A]())
return Reduce(func(fas HKTRA, fa HKTA) HKTRA {
return _ap(_map(fas, ca), fa)
}, empty)
} }
// ArrayOption returns a function to convert a sequence of options into an option of a sequence. // ArrayOption returns a function to convert a sequence of options into an option of a sequence.
@@ -86,10 +89,10 @@ func Sequence[A, HKTA, HKTRA, HKTFRA any](
// option.Some(3), // option.Some(3),
// } // }
// result2 := array.ArrayOption[int]()(opts2) // None // result2 := array.ArrayOption[int]()(opts2) // None
func ArrayOption[A any]() func([]O.Option[A]) O.Option[[]A] { func ArrayOption[A any](ma []Option[A]) Option[[]A] {
return Sequence( return MonadSequence(
O.Of[[]A], O.Map(Of[A]),
O.MonadMap[[]A, func(A) []A], O.ApplicativeMonoid(Monoid[A]()),
O.MonadAp[[]A, A], ma,
) )
} }

View File

@@ -24,8 +24,7 @@ import (
) )
func TestSequenceOption(t *testing.T) { func TestSequenceOption(t *testing.T) {
seq := ArrayOption[int]()
assert.Equal(t, O.Of([]int{1, 3}), seq([]O.Option[int]{O.Of(1), O.Of(3)})) assert.Equal(t, O.Of([]int{1, 3}), ArrayOption([]O.Option[int]{O.Of(1), O.Of(3)}))
assert.Equal(t, O.None[[]int](), seq([]O.Option[int]{O.Of(1), O.None[int]()})) assert.Equal(t, O.None[[]int](), ArrayOption([]O.Option[int]{O.Of(1), O.None[int]()}))
} }

View File

@@ -18,6 +18,7 @@ package array
import ( import (
"testing" "testing"
N "github.com/IBM/fp-go/v2/number"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -243,7 +244,7 @@ func TestSliceComposition(t *testing.T) {
t.Run("slice then map", func(t *testing.T) { t.Run("slice then map", func(t *testing.T) {
sliced := Slice[int](2, 5)(data) sliced := Slice[int](2, 5)(data)
mapped := Map(func(x int) int { return x * 2 })(sliced) mapped := Map(N.Mul(2))(sliced)
assert.Equal(t, []int{4, 6, 8}, mapped) assert.Equal(t, []int{4, 6, 8}, mapped)
}) })

View File

@@ -32,7 +32,7 @@ import (
// // Result: [1, 1, 2, 3, 4, 5, 6, 9] // // Result: [1, 1, 2, 3, 4, 5, 6, 9]
// //
//go:inline //go:inline
func Sort[T any](ord O.Ord[T]) func(ma []T) []T { func Sort[T any](ord O.Ord[T]) Operator[T, T] {
return G.Sort[[]T](ord) return G.Sort[[]T](ord)
} }
@@ -62,7 +62,7 @@ func Sort[T any](ord O.Ord[T]) func(ma []T) []T {
// // Result: [{"Bob", 25}, {"Alice", 30}, {"Charlie", 35}] // // Result: [{"Bob", 25}, {"Alice", 30}, {"Charlie", 35}]
// //
//go:inline //go:inline
func SortByKey[K, T any](ord O.Ord[K], f func(T) K) func(ma []T) []T { func SortByKey[K, T any](ord O.Ord[K], f func(T) K) Operator[T, T] {
return G.SortByKey[[]T](ord, f) return G.SortByKey[[]T](ord, f)
} }
@@ -93,6 +93,6 @@ func SortByKey[K, T any](ord O.Ord[K], f func(T) K) func(ma []T) []T {
// // Result: [{"Jones", "Bob"}, {"Smith", "Alice"}, {"Smith", "John"}] // // Result: [{"Jones", "Bob"}, {"Smith", "Alice"}, {"Smith", "John"}]
// //
//go:inline //go:inline
func SortBy[T any](ord []O.Ord[T]) func(ma []T) []T { func SortBy[T any](ord []O.Ord[T]) Operator[T, T] {
return G.SortBy[[]T](ord) return G.SortBy[[]T](ord)
} }

View File

@@ -80,3 +80,25 @@ func MonadTraverse[A, B, HKTB, HKTAB, HKTRB any](
return array.MonadTraverse(fof, fmap, fap, ta, f) return array.MonadTraverse(fof, fmap, fap, ta, f)
} }
//go:inline
func TraverseWithIndex[A, B, HKTB, HKTAB, HKTRB any](
fof func([]B) HKTRB,
fmap func(func([]B) func(B) []B) func(HKTRB) HKTAB,
fap func(HKTB) func(HKTAB) HKTRB,
f func(int, A) HKTB) func([]A) HKTRB {
return array.TraverseWithIndex[[]A](fof, fmap, fap, f)
}
//go:inline
func MonadTraverseWithIndex[A, B, HKTB, HKTAB, HKTRB any](
fof func([]B) HKTRB,
fmap func(func([]B) func(B) []B) func(HKTRB) HKTAB,
fap func(HKTB) func(HKTAB) HKTRB,
ta []A,
f func(int, A) HKTB) HKTRB {
return array.MonadTraverseWithIndex(fof, fmap, fap, ta, f)
}

9
v2/array/types.go Normal file
View File

@@ -0,0 +1,9 @@
package array
import "github.com/IBM/fp-go/v2/option"
type (
Kleisli[A, B any] = func(A) []B
Operator[A, B any] = Kleisli[[]A, B]
Option[A any] = option.Option[A]
)

View File

@@ -46,6 +46,6 @@ func StrictUniq[A comparable](as []A) []A {
// // Result: [{"Alice", 30}, {"Bob", 25}, {"Charlie", 30}] // // Result: [{"Alice", 30}, {"Bob", 25}, {"Charlie", 30}]
// //
//go:inline //go:inline
func Uniq[A any, K comparable](f func(A) K) func(as []A) []A { func Uniq[A any, K comparable](f func(A) K) Operator[A, A] {
return G.Uniq[[]A](f) return G.Uniq[[]A](f)
} }

710
v2/assert/assert.go Normal file
View File

@@ -0,0 +1,710 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package assert provides functional assertion helpers for testing.
//
// This package wraps testify/assert functions in a Reader monad pattern,
// allowing for composable and functional test assertions. Each assertion
// returns a Reader that takes a *testing.T and performs the assertion.
//
// # Data Last Principle
//
// This package follows the "data last" functional programming principle, where
// the data being operated on comes as the last parameter in a chain of function
// applications. This design enables several powerful functional programming patterns:
//
// 1. **Partial Application**: You can create reusable assertion functions by providing
// configuration parameters first, leaving the data and testing context for later.
//
// 2. **Function Composition**: Assertions can be composed and combined before being
// applied to actual data.
//
// 3. **Point-Free Style**: You can pass assertion functions around without immediately
// providing the data they operate on.
//
// The general pattern is:
//
// assert.Function(config)(data)(testingContext)
// ↑ ↑ ↑
// expected actual *testing.T (always last)
//
// For single-parameter assertions:
//
// assert.Function(data)(testingContext)
// ↑ ↑
// actual *testing.T (always last)
//
// Examples of "data last" in action:
//
// // Multi-parameter: expected value → actual value → testing context
// assert.Equal(42)(result)(t)
// assert.ArrayContains(3)(numbers)(t)
//
// // Single-parameter: data → testing context
// assert.NoError(err)(t)
// assert.ArrayNotEmpty(arr)(t)
//
// // Partial application - create reusable assertions
// isPositive := assert.That(func(n int) bool { return n > 0 })
// // Later, apply to different values:
// isPositive(42)(t) // Passes
// isPositive(-5)(t) // Fails
//
// // Composition - combine assertions before applying data
// validateUser := func(u User) assert.Reader {
// return assert.AllOf([]assert.Reader{
// assert.Equal("Alice")(u.Name),
// assert.That(func(age int) bool { return age >= 18 })(u.Age),
// })
// }
// validateUser(user)(t)
//
// The package supports:
// - Equality and inequality assertions
// - Collection assertions (arrays, maps, strings)
// - Error handling assertions
// - Result type assertions
// - Custom predicate assertions
// - Composable test suites
//
// Example:
//
// func TestExample(t *testing.T) {
// value := 42
// assert.Equal(42)(value)(t) // Curried style
//
// // Composing multiple assertions
// arr := []int{1, 2, 3}
// assertions := assert.AllOf([]assert.Reader{
// assert.ArrayNotEmpty(arr),
// assert.ArrayLength[int](3)(arr),
// assert.ArrayContains(2)(arr),
// })
// assertions(t)
// }
package assert
import (
"fmt"
"testing"
"github.com/IBM/fp-go/v2/boolean"
"github.com/IBM/fp-go/v2/eq"
"github.com/IBM/fp-go/v2/option"
"github.com/IBM/fp-go/v2/reader"
"github.com/IBM/fp-go/v2/result"
"github.com/stretchr/testify/assert"
)
var (
// Eq is the equal predicate checking if objects are equal
Eq = eq.FromEquals(assert.ObjectsAreEqual)
)
// wrap1 is an internal helper function that wraps testify assertion functions
// into the Reader monad pattern with curried parameters.
//
// It takes a testify assertion function and converts it into a curried function
// that first takes an expected value, then an actual value, and finally returns
// a Reader that performs the assertion when given a *testing.T.
//
// Parameters:
// - wrapped: The testify assertion function to wrap
// - expected: The expected value for comparison
// - msgAndArgs: Optional message and arguments for assertion failure
//
// Returns:
// - A Kleisli function that takes the actual value and returns a Reader
func wrap1[T any](wrapped func(t assert.TestingT, expected, actual any, msgAndArgs ...any) bool, expected T, msgAndArgs ...any) Kleisli[T] {
return func(actual T) Reader {
return func(t *testing.T) bool {
return wrapped(t, expected, actual, msgAndArgs...)
}
}
}
// NotEqual tests if the expected and the actual values are not equal.
//
// This function follows the "data last" principle - you provide the expected value first,
// then the actual value, and finally the testing.T context.
//
// Example:
//
// func TestNotEqual(t *testing.T) {
// value := 42
// assert.NotEqual(10)(value)(t) // Passes: 42 != 10
// assert.NotEqual(42)(value)(t) // Fails: 42 == 42
// }
func NotEqual[T any](expected T) Kleisli[T] {
return wrap1(assert.NotEqual, expected)
}
// Equal tests if the expected and the actual values are equal.
//
// This is one of the most commonly used assertions. It follows the "data last" principle -
// you provide the expected value first, then the actual value, and finally the testing.T context.
//
// Example:
//
// func TestEqual(t *testing.T) {
// result := 2 + 2
// assert.Equal(4)(result)(t) // Passes
//
// name := "Alice"
// assert.Equal("Alice")(name)(t) // Passes
//
// // Can be composed with other assertions
// user := User{Name: "Bob", Age: 30}
// assertions := assert.AllOf([]assert.Reader{
// assert.Equal("Bob")(user.Name),
// assert.Equal(30)(user.Age),
// })
// assertions(t)
// }
func Equal[T any](expected T) Kleisli[T] {
return wrap1(assert.Equal, expected)
}
// ArrayNotEmpty checks if an array is not empty.
//
// Example:
//
// func TestArrayNotEmpty(t *testing.T) {
// numbers := []int{1, 2, 3}
// assert.ArrayNotEmpty(numbers)(t) // Passes
//
// empty := []int{}
// assert.ArrayNotEmpty(empty)(t) // Fails
// }
func ArrayNotEmpty[T any](arr []T) Reader {
return func(t *testing.T) bool {
return assert.NotEmpty(t, arr)
}
}
// RecordNotEmpty checks if a map is not empty.
//
// Example:
//
// func TestRecordNotEmpty(t *testing.T) {
// config := map[string]int{"timeout": 30, "retries": 3}
// assert.RecordNotEmpty(config)(t) // Passes
//
// empty := map[string]int{}
// assert.RecordNotEmpty(empty)(t) // Fails
// }
func RecordNotEmpty[K comparable, T any](mp map[K]T) Reader {
return func(t *testing.T) bool {
return assert.NotEmpty(t, mp)
}
}
// StringNotEmpty checks if a string is not empty.
//
// Example:
//
// func TestStringNotEmpty(t *testing.T) {
// message := "Hello, World!"
// assert.StringNotEmpty(message)(t) // Passes
//
// empty := ""
// assert.StringNotEmpty(empty)(t) // Fails
// }
func StringNotEmpty(s string) Reader {
return func(t *testing.T) bool {
return assert.NotEmpty(t, s)
}
}
// ArrayLength tests if an array has the expected length.
//
// Example:
//
// func TestArrayLength(t *testing.T) {
// numbers := []int{1, 2, 3, 4, 5}
// assert.ArrayLength[int](5)(numbers)(t) // Passes
// assert.ArrayLength[int](3)(numbers)(t) // Fails
// }
func ArrayLength[T any](expected int) Kleisli[[]T] {
return func(actual []T) Reader {
return func(t *testing.T) bool {
return assert.Len(t, actual, expected)
}
}
}
// RecordLength tests if a map has the expected length.
//
// Example:
//
// func TestRecordLength(t *testing.T) {
// config := map[string]string{"host": "localhost", "port": "8080"}
// assert.RecordLength[string, string](2)(config)(t) // Passes
// assert.RecordLength[string, string](3)(config)(t) // Fails
// }
func RecordLength[K comparable, T any](expected int) Kleisli[map[K]T] {
return func(actual map[K]T) Reader {
return func(t *testing.T) bool {
return assert.Len(t, actual, expected)
}
}
}
// StringLength tests if a string has the expected length.
//
// Example:
//
// func TestStringLength(t *testing.T) {
// message := "Hello"
// assert.StringLength[any, any](5)(message)(t) // Passes
// assert.StringLength[any, any](10)(message)(t) // Fails
// }
func StringLength[K comparable, T any](expected int) Kleisli[string] {
return func(actual string) Reader {
return func(t *testing.T) bool {
return assert.Len(t, actual, expected)
}
}
}
// NoError validates that there is no error.
//
// This is commonly used to assert that operations complete successfully.
//
// Example:
//
// func TestNoError(t *testing.T) {
// err := doSomething()
// assert.NoError(err)(t) // Passes if err is nil
//
// // Can be used with result types
// result := result.TryCatch(func() (int, error) {
// return 42, nil
// })
// assert.Success(result)(t) // Uses NoError internally
// }
func NoError(err error) Reader {
return func(t *testing.T) bool {
return assert.NoError(t, err)
}
}
// Error validates that there is an error.
//
// This is used to assert that operations fail as expected.
//
// Example:
//
// func TestError(t *testing.T) {
// err := validateInput("")
// assert.Error(err)(t) // Passes if err is not nil
//
// err2 := validateInput("valid")
// assert.Error(err2)(t) // Fails if err2 is nil
// }
func Error(err error) Reader {
return func(t *testing.T) bool {
return assert.Error(t, err)
}
}
// Success checks if a [Result] represents success.
//
// This is a convenience function for testing Result types from the fp-go library.
//
// Example:
//
// func TestSuccess(t *testing.T) {
// res := result.Of[int](42)
// assert.Success(res)(t) // Passes
//
// failedRes := result.Error[int](errors.New("failed"))
// assert.Success(failedRes)(t) // Fails
// }
func Success[T any](res Result[T]) Reader {
return NoError(result.ToError(res))
}
// Failure checks if a [Result] represents failure.
//
// This is a convenience function for testing Result types from the fp-go library.
//
// Example:
//
// func TestFailure(t *testing.T) {
// res := result.Error[int](errors.New("something went wrong"))
// assert.Failure(res)(t) // Passes
//
// successRes := result.Of[int](42)
// assert.Failure(successRes)(t) // Fails
// }
func Failure[T any](res Result[T]) Reader {
return Error(result.ToError(res))
}
// ArrayContains tests if a value is contained in an array.
//
// Example:
//
// func TestArrayContains(t *testing.T) {
// numbers := []int{1, 2, 3, 4, 5}
// assert.ArrayContains(3)(numbers)(t) // Passes
// assert.ArrayContains(10)(numbers)(t) // Fails
//
// names := []string{"Alice", "Bob", "Charlie"}
// assert.ArrayContains("Bob")(names)(t) // Passes
// }
func ArrayContains[T any](expected T) Kleisli[[]T] {
return func(actual []T) Reader {
return func(t *testing.T) bool {
return assert.Contains(t, actual, expected)
}
}
}
// ContainsKey tests if a key is contained in a map.
//
// Example:
//
// func TestContainsKey(t *testing.T) {
// config := map[string]int{"timeout": 30, "retries": 3}
// assert.ContainsKey[int]("timeout")(config)(t) // Passes
// assert.ContainsKey[int]("maxSize")(config)(t) // Fails
// }
func ContainsKey[T any, K comparable](expected K) Kleisli[map[K]T] {
return func(actual map[K]T) Reader {
return func(t *testing.T) bool {
return assert.Contains(t, actual, expected)
}
}
}
// NotContainsKey tests if a key is not contained in a map.
//
// Example:
//
// func TestNotContainsKey(t *testing.T) {
// config := map[string]int{"timeout": 30, "retries": 3}
// assert.NotContainsKey[int]("maxSize")(config)(t) // Passes
// assert.NotContainsKey[int]("timeout")(config)(t) // Fails
// }
func NotContainsKey[T any, K comparable](expected K) Kleisli[map[K]T] {
return func(actual map[K]T) Reader {
return func(t *testing.T) bool {
return assert.NotContains(t, actual, expected)
}
}
}
// That asserts that a particular predicate matches.
//
// This is a powerful function that allows you to create custom assertions using predicates.
//
// Example:
//
// func TestThat(t *testing.T) {
// // Test if a number is positive
// isPositive := func(n int) bool { return n > 0 }
// assert.That(isPositive)(42)(t) // Passes
// assert.That(isPositive)(-5)(t) // Fails
//
// // Test if a string is uppercase
// isUppercase := func(s string) bool { return s == strings.ToUpper(s) }
// assert.That(isUppercase)("HELLO")(t) // Passes
// assert.That(isUppercase)("Hello")(t) // Fails
//
// // Can be combined with Local for property testing
// type User struct { Age int }
// ageIsAdult := assert.Local(func(u User) int { return u.Age })(
// assert.That(func(age int) bool { return age >= 18 }),
// )
// user := User{Age: 25}
// ageIsAdult(user)(t) // Passes
// }
func That[T any](pred Predicate[T]) Kleisli[T] {
return func(a T) Reader {
return func(t *testing.T) bool {
if pred(a) {
return true
}
return assert.Fail(t, fmt.Sprintf("Preficate %v does not match value %v", pred, a))
}
}
}
// AllOf combines multiple assertion Readers into a single Reader that passes
// only if all assertions pass.
//
// This function uses boolean AND logic (MonoidAll) to combine the results of
// all assertions. If any assertion fails, the combined assertion fails.
//
// This is useful for grouping related assertions together and ensuring all
// conditions are met.
//
// Parameters:
// - readers: Array of assertion Readers to combine
//
// Returns:
// - A single Reader that performs all assertions and returns true only if all pass
//
// Example:
//
// func TestUser(t *testing.T) {
// user := User{Name: "Alice", Age: 30, Active: true}
// assertions := assert.AllOf([]assert.Reader{
// assert.Equal("Alice")(user.Name),
// assert.Equal(30)(user.Age),
// assert.Equal(true)(user.Active),
// })
// assertions(t)
// }
//
//go:inline
func AllOf(readers []Reader) Reader {
return reader.MonadReduceArrayM(readers, boolean.MonoidAll)
}
// RunAll executes a map of named test cases, running each as a subtest.
//
// This function creates a Reader that runs multiple named test cases using
// Go's t.Run for proper test isolation and reporting. Each test case is
// executed as a separate subtest with its own name.
//
// The function returns true only if all subtests pass. This allows for
// better test organization and clearer test output.
//
// Parameters:
// - testcases: Map of test names to assertion Readers
//
// Returns:
// - A Reader that executes all named test cases and returns true if all pass
//
// Example:
//
// func TestMathOperations(t *testing.T) {
// testcases := map[string]assert.Reader{
// "addition": assert.Equal(4)(2 + 2),
// "multiplication": assert.Equal(6)(2 * 3),
// "subtraction": assert.Equal(1)(3 - 2),
// }
// assert.RunAll(testcases)(t)
// }
//
//go:inline
func RunAll(testcases map[string]Reader) Reader {
return func(t *testing.T) bool {
current := true
for k, r := range testcases {
current = current && t.Run(k, func(t1 *testing.T) {
r(t1)
})
}
return current
}
}
// Local transforms a Reader that works on type R1 into a Reader that works on type R2,
// by providing a function that converts R2 to R1. This allows you to focus a test on a
// specific property or subset of a larger data structure.
//
// This is particularly useful when you have an assertion that operates on a specific field
// or property, and you want to apply it to a complete object. Instead of extracting the
// property and then asserting on it, you can transform the assertion to work directly
// on the whole object.
//
// Parameters:
// - f: A function that extracts or transforms R2 into R1
//
// Returns:
// - A function that transforms a Reader[R1, Reader] into a Reader[R2, Reader]
//
// Example:
//
// type User struct {
// Name string
// Age int
// }
//
// // Create an assertion that checks if age is positive
// ageIsPositive := assert.That(func(age int) bool { return age > 0 })
//
// // Focus this assertion on the Age field of User
// userAgeIsPositive := assert.Local(func(u User) int { return u.Age })(ageIsPositive)
//
// // Now we can test the whole User object
// user := User{Name: "Alice", Age: 30}
// userAgeIsPositive(user)(t)
//
//go:inline
func Local[R1, R2 any](f func(R2) R1) func(Kleisli[R1]) Kleisli[R2] {
return reader.Local[Reader](f)
}
// LocalL is similar to Local but uses a Lens to focus on a specific property.
// A Lens is a functional programming construct that provides a composable way to
// focus on a part of a data structure.
//
// This function is particularly useful when you want to focus a test on a specific
// field of a struct using a lens, making the code more declarative and composable.
// Lenses are often code-generated or predefined for common data structures.
//
// Parameters:
// - l: A Lens that focuses from type S to type T
//
// Returns:
// - A function that transforms a Reader[T, Reader] into a Reader[S, Reader]
//
// Example:
//
// type Person struct {
// Name string
// Email string
// }
//
// // Assume we have a lens that focuses on the Email field
// var emailLens = lens.Prop[Person, string]("Email")
//
// // Create an assertion for email format
// validEmail := assert.That(func(email string) bool {
// return strings.Contains(email, "@")
// })
//
// // Focus this assertion on the Email property using a lens
// validPersonEmail := assert.LocalL(emailLens)(validEmail)
//
// // Test a Person object
// person := Person{Name: "Bob", Email: "bob@example.com"}
// validPersonEmail(person)(t)
//
//go:inline
func LocalL[S, T any](l Lens[S, T]) func(Kleisli[T]) Kleisli[S] {
return reader.Local[Reader](l.Get)
}
// fromOptionalGetter is an internal helper that creates an assertion Reader from
// an optional getter function. It asserts that the optional value is present (Some).
func fromOptionalGetter[S, T any](getter func(S) option.Option[T], msgAndArgs ...any) Kleisli[S] {
return func(s S) Reader {
return func(t *testing.T) bool {
return assert.True(t, option.IsSome(getter(s)), msgAndArgs...)
}
}
}
// FromOptional creates an assertion that checks if an Optional can successfully extract a value.
// An Optional is an optic that represents an optional reference to a subpart of a data structure.
//
// This function is useful when you have an Optional optic and want to assert that the optional
// value is present (Some) rather than absent (None). The assertion passes if the Optional's
// GetOption returns Some, and fails if it returns None.
//
// This enables property-focused testing where you verify that a particular optional field or
// sub-structure exists and is accessible.
//
// Parameters:
// - opt: An Optional optic that focuses from type S to type T
//
// Returns:
// - A Reader that asserts the optional value is present when applied to a value of type S
//
// Example:
//
// type Config struct {
// Database *DatabaseConfig // Optional field
// }
//
// type DatabaseConfig struct {
// Host string
// Port int
// }
//
// // Create an Optional that focuses on the Database field
// dbOptional := optional.MakeOptional(
// func(c Config) option.Option[*DatabaseConfig] {
// if c.Database != nil {
// return option.Some(c.Database)
// }
// return option.None[*DatabaseConfig]()
// },
// func(c Config, db *DatabaseConfig) Config {
// c.Database = db
// return c
// },
// )
//
// // Assert that the database config is present
// hasDatabaseConfig := assert.FromOptional(dbOptional)
//
// config := Config{Database: &DatabaseConfig{Host: "localhost", Port: 5432}}
// hasDatabaseConfig(config)(t) // Passes
//
// emptyConfig := Config{Database: nil}
// hasDatabaseConfig(emptyConfig)(t) // Fails
//
//go:inline
func FromOptional[S, T any](opt Optional[S, T]) reader.Reader[S, Reader] {
return fromOptionalGetter(opt.GetOption, "Optional: %s", opt)
}
// FromPrism creates an assertion that checks if a Prism can successfully extract a value.
// A Prism is an optic used to select part of a sum type (tagged union or variant).
//
// This function is useful when you have a Prism optic and want to assert that a value
// matches a specific variant of a sum type. The assertion passes if the Prism's GetOption
// returns Some (meaning the value is of the expected variant), and fails if it returns None
// (meaning the value is a different variant).
//
// This enables variant-focused testing where you verify that a value is of a particular
// type or matches a specific condition within a sum type.
//
// Parameters:
// - p: A Prism optic that focuses from type S to type T
//
// Returns:
// - A Reader that asserts the prism successfully extracts when applied to a value of type S
//
// Example:
//
// type Result interface{ isResult() }
// type Success struct{ Value int }
// type Failure struct{ Error string }
//
// func (Success) isResult() {}
// func (Failure) isResult() {}
//
// // Create a Prism that focuses on Success variant
// successPrism := prism.MakePrism(
// func(r Result) option.Option[int] {
// if s, ok := r.(Success); ok {
// return option.Some(s.Value)
// }
// return option.None[int]()
// },
// func(v int) Result { return Success{Value: v} },
// )
//
// // Assert that the result is a Success
// isSuccess := assert.FromPrism(successPrism)
//
// result1 := Success{Value: 42}
// isSuccess(result1)(t) // Passes
//
// result2 := Failure{Error: "something went wrong"}
// isSuccess(result2)(t) // Fails
//
//go:inline
func FromPrism[S, T any](p Prism[S, T]) reader.Reader[S, Reader] {
return fromOptionalGetter(p.GetOption, "Prism: %s", p)
}

View File

@@ -16,94 +16,677 @@
package assert package assert
import ( import (
"fmt" "errors"
"testing" "testing"
"github.com/IBM/fp-go/v2/eq" "github.com/IBM/fp-go/v2/optics/prism"
"github.com/IBM/fp-go/v2/option"
"github.com/IBM/fp-go/v2/result" "github.com/IBM/fp-go/v2/result"
"github.com/stretchr/testify/assert" S "github.com/IBM/fp-go/v2/string"
) )
var ( func TestEqual(t *testing.T) {
errTest = fmt.Errorf("test failure") t.Run("should pass when values are equal", func(t *testing.T) {
result := Equal(42)(42)(t)
if !result {
t.Error("Expected Equal to pass for equal values")
}
})
// Eq is the equal predicate checking if objects are equal t.Run("should fail when values are not equal", func(t *testing.T) {
Eq = eq.FromEquals(assert.ObjectsAreEqual) mockT := &testing.T{}
) result := Equal(42)(43)(mockT)
if result {
t.Error("Expected Equal to fail for different values")
}
})
func wrap1[T any](wrapped func(t assert.TestingT, expected, actual any, msgAndArgs ...any) bool, t *testing.T, expected T) result.Kleisli[T, T] { t.Run("should work with strings", func(t *testing.T) {
return func(actual T) Result[T] { result := Equal("hello")("hello")(t)
ok := wrapped(t, expected, actual) if !result {
if ok { t.Error("Expected Equal to pass for equal strings")
return result.Of(actual)
} }
return result.Left[T](errTest)
}
}
// NotEqual tests if the expected and the actual values are not equal
func NotEqual[T any](t *testing.T, expected T) result.Kleisli[T, T] {
return wrap1(assert.NotEqual, t, expected)
}
// Equal tests if the expected and the actual values are equal
func Equal[T any](t *testing.T, expected T) result.Kleisli[T, T] {
return wrap1(assert.Equal, t, expected)
}
// Length tests if an array has the expected length
func Length[T any](t *testing.T, expected int) result.Kleisli[[]T, []T] {
return func(actual []T) Result[[]T] {
ok := assert.Len(t, actual, expected)
if ok {
return result.Of(actual)
}
return result.Left[[]T](errTest)
}
}
// NoError validates that there is no error
func NoError[T any](t *testing.T) result.Operator[T, T] {
return func(actual Result[T]) Result[T] {
return result.MonadFold(actual, func(e error) Result[T] {
assert.NoError(t, e)
return result.Left[T](e)
}, func(value T) Result[T] {
assert.NoError(t, nil)
return result.Of(value)
}) })
} }
func TestNotEqual(t *testing.T) {
t.Run("should pass when values are not equal", func(t *testing.T) {
result := NotEqual(42)(43)(t)
if !result {
t.Error("Expected NotEqual to pass for different values")
}
})
t.Run("should fail when values are equal", func(t *testing.T) {
mockT := &testing.T{}
result := NotEqual(42)(42)(mockT)
if result {
t.Error("Expected NotEqual to fail for equal values")
}
})
} }
// ArrayContains tests if a value is contained in an array func TestArrayNotEmpty(t *testing.T) {
func ArrayContains[T any](t *testing.T, expected T) result.Kleisli[[]T, []T] { t.Run("should pass for non-empty array", func(t *testing.T) {
return func(actual []T) Result[[]T] { arr := []int{1, 2, 3}
ok := assert.Contains(t, actual, expected) result := ArrayNotEmpty(arr)(t)
if ok { if !result {
return result.Of(actual) t.Error("Expected ArrayNotEmpty to pass for non-empty array")
} }
return result.Left[[]T](errTest) })
t.Run("should fail for empty array", func(t *testing.T) {
mockT := &testing.T{}
arr := []int{}
result := ArrayNotEmpty(arr)(mockT)
if result {
t.Error("Expected ArrayNotEmpty to fail for empty array")
} }
})
} }
// ContainsKey tests if a key is contained in a map func TestRecordNotEmpty(t *testing.T) {
func ContainsKey[T any, K comparable](t *testing.T, expected K) result.Kleisli[map[K]T, map[K]T] { t.Run("should pass for non-empty map", func(t *testing.T) {
return func(actual map[K]T) Result[map[K]T] { mp := map[string]int{"a": 1, "b": 2}
ok := assert.Contains(t, actual, expected) result := RecordNotEmpty(mp)(t)
if ok { if !result {
return result.Of(actual) t.Error("Expected RecordNotEmpty to pass for non-empty map")
} }
return result.Left[map[K]T](errTest) })
t.Run("should fail for empty map", func(t *testing.T) {
mockT := &testing.T{}
mp := map[string]int{}
result := RecordNotEmpty(mp)(mockT)
if result {
t.Error("Expected RecordNotEmpty to fail for empty map")
} }
})
} }
// NotContainsKey tests if a key is not contained in a map func TestArrayLength(t *testing.T) {
func NotContainsKey[T any, K comparable](t *testing.T, expected K) result.Kleisli[map[K]T, map[K]T] { t.Run("should pass when length matches", func(t *testing.T) {
return func(actual map[K]T) Result[map[K]T] { arr := []int{1, 2, 3}
ok := assert.NotContains(t, actual, expected) result := ArrayLength[int](3)(arr)(t)
if ok { if !result {
return result.Of(actual) t.Error("Expected ArrayLength to pass when length matches")
} }
return result.Left[map[K]T](errTest) })
t.Run("should fail when length doesn't match", func(t *testing.T) {
mockT := &testing.T{}
arr := []int{1, 2, 3}
result := ArrayLength[int](5)(arr)(mockT)
if result {
t.Error("Expected ArrayLength to fail when length doesn't match")
} }
})
t.Run("should work with empty array", func(t *testing.T) {
arr := []string{}
result := ArrayLength[string](0)(arr)(t)
if !result {
t.Error("Expected ArrayLength to pass for empty array with expected length 0")
}
})
}
func TestRecordLength(t *testing.T) {
t.Run("should pass when map length matches", func(t *testing.T) {
mp := map[string]int{"a": 1, "b": 2}
result := RecordLength[string, int](2)(mp)(t)
if !result {
t.Error("Expected RecordLength to pass when length matches")
}
})
t.Run("should fail when map length doesn't match", func(t *testing.T) {
mockT := &testing.T{}
mp := map[string]int{"a": 1}
result := RecordLength[string, int](3)(mp)(mockT)
if result {
t.Error("Expected RecordLength to fail when length doesn't match")
}
})
}
func TestStringLength(t *testing.T) {
t.Run("should pass when string length matches", func(t *testing.T) {
str := "hello"
result := StringLength[string, int](5)(str)(t)
if !result {
t.Error("Expected StringLength to pass when length matches")
}
})
t.Run("should fail when string length doesn't match", func(t *testing.T) {
mockT := &testing.T{}
str := "hello"
result := StringLength[string, int](10)(str)(mockT)
if result {
t.Error("Expected StringLength to fail when length doesn't match")
}
})
t.Run("should work with empty string", func(t *testing.T) {
str := ""
result := StringLength[string, int](0)(str)(t)
if !result {
t.Error("Expected StringLength to pass for empty string with expected length 0")
}
})
}
func TestNoError(t *testing.T) {
t.Run("should pass when error is nil", func(t *testing.T) {
result := NoError(nil)(t)
if !result {
t.Error("Expected NoError to pass when error is nil")
}
})
t.Run("should fail when error is not nil", func(t *testing.T) {
mockT := &testing.T{}
err := errors.New("test error")
result := NoError(err)(mockT)
if result {
t.Error("Expected NoError to fail when error is not nil")
}
})
}
func TestError(t *testing.T) {
t.Run("should pass when error is not nil", func(t *testing.T) {
err := errors.New("test error")
result := Error(err)(t)
if !result {
t.Error("Expected Error to pass when error is not nil")
}
})
t.Run("should fail when error is nil", func(t *testing.T) {
mockT := &testing.T{}
result := Error(nil)(mockT)
if result {
t.Error("Expected Error to fail when error is nil")
}
})
}
func TestSuccess(t *testing.T) {
t.Run("should pass for successful result", func(t *testing.T) {
res := result.Of(42)
result := Success(res)(t)
if !result {
t.Error("Expected Success to pass for successful result")
}
})
t.Run("should fail for error result", func(t *testing.T) {
mockT := &testing.T{}
res := result.Left[int](errors.New("test error"))
result := Success(res)(mockT)
if result {
t.Error("Expected Success to fail for error result")
}
})
}
func TestFailure(t *testing.T) {
t.Run("should pass for error result", func(t *testing.T) {
res := result.Left[int](errors.New("test error"))
result := Failure(res)(t)
if !result {
t.Error("Expected Failure to pass for error result")
}
})
t.Run("should fail for successful result", func(t *testing.T) {
mockT := &testing.T{}
res := result.Of(42)
result := Failure(res)(mockT)
if result {
t.Error("Expected Failure to fail for successful result")
}
})
}
func TestArrayContains(t *testing.T) {
t.Run("should pass when element is in array", func(t *testing.T) {
arr := []int{1, 2, 3, 4, 5}
result := ArrayContains(3)(arr)(t)
if !result {
t.Error("Expected ArrayContains to pass when element is in array")
}
})
t.Run("should fail when element is not in array", func(t *testing.T) {
mockT := &testing.T{}
arr := []int{1, 2, 3}
result := ArrayContains(10)(arr)(mockT)
if result {
t.Error("Expected ArrayContains to fail when element is not in array")
}
})
t.Run("should work with strings", func(t *testing.T) {
arr := []string{"apple", "banana", "cherry"}
result := ArrayContains("banana")(arr)(t)
if !result {
t.Error("Expected ArrayContains to pass for string element")
}
})
}
func TestContainsKey(t *testing.T) {
t.Run("should pass when key exists in map", func(t *testing.T) {
mp := map[string]int{"a": 1, "b": 2, "c": 3}
result := ContainsKey[int]("b")(mp)(t)
if !result {
t.Error("Expected ContainsKey to pass when key exists")
}
})
t.Run("should fail when key doesn't exist in map", func(t *testing.T) {
mockT := &testing.T{}
mp := map[string]int{"a": 1, "b": 2}
result := ContainsKey[int]("z")(mp)(mockT)
if result {
t.Error("Expected ContainsKey to fail when key doesn't exist")
}
})
}
func TestNotContainsKey(t *testing.T) {
t.Run("should pass when key doesn't exist in map", func(t *testing.T) {
mp := map[string]int{"a": 1, "b": 2}
result := NotContainsKey[int]("z")(mp)(t)
if !result {
t.Error("Expected NotContainsKey to pass when key doesn't exist")
}
})
t.Run("should fail when key exists in map", func(t *testing.T) {
mockT := &testing.T{}
mp := map[string]int{"a": 1, "b": 2}
result := NotContainsKey[int]("a")(mp)(mockT)
if result {
t.Error("Expected NotContainsKey to fail when key exists")
}
})
}
func TestThat(t *testing.T) {
t.Run("should pass when predicate is true", func(t *testing.T) {
isEven := func(n int) bool { return n%2 == 0 }
result := That(isEven)(42)(t)
if !result {
t.Error("Expected That to pass when predicate is true")
}
})
t.Run("should fail when predicate is false", func(t *testing.T) {
mockT := &testing.T{}
isEven := func(n int) bool { return n%2 == 0 }
result := That(isEven)(43)(mockT)
if result {
t.Error("Expected That to fail when predicate is false")
}
})
t.Run("should work with string predicates", func(t *testing.T) {
startsWithH := func(s string) bool { return S.IsNonEmpty(s) && s[0] == 'h' }
result := That(startsWithH)("hello")(t)
if !result {
t.Error("Expected That to pass for string predicate")
}
})
}
func TestAllOf(t *testing.T) {
t.Run("should pass when all assertions pass", func(t *testing.T) {
assertions := AllOf([]Reader{
Equal(42)(42),
Equal("hello")("hello"),
ArrayNotEmpty([]int{1, 2, 3}),
})
result := assertions(t)
if !result {
t.Error("Expected AllOf to pass when all assertions pass")
}
})
t.Run("should fail when any assertion fails", func(t *testing.T) {
mockT := &testing.T{}
assertions := AllOf([]Reader{
Equal(42)(42),
Equal("hello")("goodbye"),
ArrayNotEmpty([]int{1, 2, 3}),
})
result := assertions(mockT)
if result {
t.Error("Expected AllOf to fail when any assertion fails")
}
})
t.Run("should work with empty array", func(t *testing.T) {
assertions := AllOf([]Reader{})
result := assertions(t)
if !result {
t.Error("Expected AllOf to pass for empty array")
}
})
t.Run("should combine multiple array assertions", func(t *testing.T) {
arr := []int{1, 2, 3, 4, 5}
assertions := AllOf([]Reader{
ArrayNotEmpty(arr),
ArrayLength[int](5)(arr),
ArrayContains(3)(arr),
})
result := assertions(t)
if !result {
t.Error("Expected AllOf to pass for multiple array assertions")
}
})
}
func TestRunAll(t *testing.T) {
t.Run("should run all named test cases", func(t *testing.T) {
testcases := map[string]Reader{
"equality": Equal(42)(42),
"string_check": Equal("test")("test"),
"array_check": ArrayNotEmpty([]int{1, 2, 3}),
}
result := RunAll(testcases)(t)
if !result {
t.Error("Expected RunAll to pass when all test cases pass")
}
})
// Note: Testing failure behavior of RunAll is tricky because subtests
// will actually fail in the test framework. The function works correctly
// as demonstrated by the passing test above.
t.Run("should work with empty test cases", func(t *testing.T) {
testcases := map[string]Reader{}
result := RunAll(testcases)(t)
if !result {
t.Error("Expected RunAll to pass for empty test cases")
}
})
}
func TestEq(t *testing.T) {
t.Run("should return true for equal values", func(t *testing.T) {
if !Eq.Equals(42, 42) {
t.Error("Expected Eq to return true for equal integers")
}
})
t.Run("should return false for different values", func(t *testing.T) {
if Eq.Equals(42, 43) {
t.Error("Expected Eq to return false for different integers")
}
})
t.Run("should work with strings", func(t *testing.T) {
if !Eq.Equals("hello", "hello") {
t.Error("Expected Eq to return true for equal strings")
}
if Eq.Equals("hello", "world") {
t.Error("Expected Eq to return false for different strings")
}
})
t.Run("should work with slices", func(t *testing.T) {
arr1 := []int{1, 2, 3}
arr2 := []int{1, 2, 3}
if !Eq.Equals(arr1, arr2) {
t.Error("Expected Eq to return true for equal slices")
}
})
}
func TestLocal(t *testing.T) {
type User struct {
Name string
Age int
}
t.Run("should focus assertion on a property", func(t *testing.T) {
// Create an assertion that checks if age is positive
ageIsPositive := That(func(age int) bool { return age > 0 })
// Focus this assertion on the Age field of User
userAgeIsPositive := Local(func(u User) int { return u.Age })(ageIsPositive)
// Test with a user who has a positive age
user := User{Name: "Alice", Age: 30}
result := userAgeIsPositive(user)(t)
if !result {
t.Error("Expected focused assertion to pass for positive age")
}
})
t.Run("should fail when focused property doesn't match", func(t *testing.T) {
mockT := &testing.T{}
ageIsPositive := That(func(age int) bool { return age > 0 })
userAgeIsPositive := Local(func(u User) int { return u.Age })(ageIsPositive)
// Test with a user who has zero age
user := User{Name: "Bob", Age: 0}
result := userAgeIsPositive(user)(mockT)
if result {
t.Error("Expected focused assertion to fail for zero age")
}
})
t.Run("should compose with other assertions", func(t *testing.T) {
// Create multiple focused assertions
nameNotEmpty := Local(func(u User) string { return u.Name })(
That(S.IsNonEmpty),
)
ageInRange := Local(func(u User) int { return u.Age })(
That(func(age int) bool { return age >= 18 && age <= 100 }),
)
user := User{Name: "Charlie", Age: 25}
assertions := AllOf([]Reader{
nameNotEmpty(user),
ageInRange(user),
})
result := assertions(t)
if !result {
t.Error("Expected composed focused assertions to pass")
}
})
t.Run("should work with Equal assertion", func(t *testing.T) {
// Focus Equal assertion on Name field
nameIsAlice := Local(func(u User) string { return u.Name })(Equal("Alice"))
user := User{Name: "Alice", Age: 30}
result := nameIsAlice(user)(t)
if !result {
t.Error("Expected focused Equal assertion to pass")
}
})
}
func TestLocalL(t *testing.T) {
// Note: LocalL requires lens package which provides lens operations.
// This test demonstrates the concept, but actual usage would require
// proper lens definitions.
t.Run("conceptual test for LocalL", func(t *testing.T) {
// LocalL is similar to Local but uses lenses for focusing.
// It would be used like:
// validEmail := That(func(email string) bool { return strings.Contains(email, "@") })
// validPersonEmail := LocalL(emailLens)(validEmail)
//
// The actual implementation would require lens definitions from the lens package.
// This test serves as documentation for the intended usage.
})
}
func TestFromOptional(t *testing.T) {
type DatabaseConfig struct {
Host string
Port int
}
type Config struct {
Database *DatabaseConfig
}
// Create an Optional that focuses on the Database field
dbOptional := Optional[Config, *DatabaseConfig]{
GetOption: func(c Config) option.Option[*DatabaseConfig] {
if c.Database != nil {
return option.Of(c.Database)
}
return option.None[*DatabaseConfig]()
},
Set: func(db *DatabaseConfig) func(Config) Config {
return func(c Config) Config {
c.Database = db
return c
}
},
}
t.Run("should pass when optional value is present", func(t *testing.T) {
config := Config{Database: &DatabaseConfig{Host: "localhost", Port: 5432}}
hasDatabaseConfig := FromOptional(dbOptional)
result := hasDatabaseConfig(config)(t)
if !result {
t.Error("Expected FromOptional to pass when optional value is present")
}
})
t.Run("should fail when optional value is absent", func(t *testing.T) {
mockT := &testing.T{}
emptyConfig := Config{Database: nil}
hasDatabaseConfig := FromOptional(dbOptional)
result := hasDatabaseConfig(emptyConfig)(mockT)
if result {
t.Error("Expected FromOptional to fail when optional value is absent")
}
})
t.Run("should work with nested optionals", func(t *testing.T) {
type AdvancedSettings struct {
Cache bool
}
type Settings struct {
Advanced *AdvancedSettings
}
advancedOptional := Optional[Settings, *AdvancedSettings]{
GetOption: func(s Settings) option.Option[*AdvancedSettings] {
if s.Advanced != nil {
return option.Of(s.Advanced)
}
return option.None[*AdvancedSettings]()
},
Set: func(adv *AdvancedSettings) func(Settings) Settings {
return func(s Settings) Settings {
s.Advanced = adv
return s
}
},
}
settings := Settings{Advanced: &AdvancedSettings{Cache: true}}
hasAdvanced := FromOptional(advancedOptional)
result := hasAdvanced(settings)(t)
if !result {
t.Error("Expected FromOptional to pass for nested optional")
}
})
}
// Helper types for Prism testing
type PrismTestResult interface {
isPrismTestResult()
}
type PrismTestSuccess struct {
Value int
}
type PrismTestFailure struct {
Error string
}
func (PrismTestSuccess) isPrismTestResult() {}
func (PrismTestFailure) isPrismTestResult() {}
func TestFromPrism(t *testing.T) {
// Create a Prism that focuses on Success variant using prism.MakePrism
successPrism := prism.MakePrism(
func(r PrismTestResult) option.Option[int] {
if s, ok := r.(PrismTestSuccess); ok {
return option.Of(s.Value)
}
return option.None[int]()
},
func(v int) PrismTestResult {
return PrismTestSuccess{Value: v}
},
)
// Create a Prism that focuses on Failure variant
failurePrism := prism.MakePrism(
func(r PrismTestResult) option.Option[string] {
if f, ok := r.(PrismTestFailure); ok {
return option.Of(f.Error)
}
return option.None[string]()
},
func(err string) PrismTestResult {
return PrismTestFailure{Error: err}
},
)
t.Run("should pass when prism successfully extracts", func(t *testing.T) {
result := PrismTestSuccess{Value: 42}
isSuccess := FromPrism(successPrism)
testResult := isSuccess(result)(t)
if !testResult {
t.Error("Expected FromPrism to pass when prism extracts successfully")
}
})
t.Run("should fail when prism cannot extract", func(t *testing.T) {
mockT := &testing.T{}
result := PrismTestFailure{Error: "something went wrong"}
isSuccess := FromPrism(successPrism)
testResult := isSuccess(result)(mockT)
if testResult {
t.Error("Expected FromPrism to fail when prism cannot extract")
}
})
t.Run("should work with failure prism", func(t *testing.T) {
result := PrismTestFailure{Error: "test error"}
isFailure := FromPrism(failurePrism)
testResult := isFailure(result)(t)
if !testResult {
t.Error("Expected FromPrism to pass for failure prism on failure result")
}
})
t.Run("should fail with failure prism on success result", func(t *testing.T) {
mockT := &testing.T{}
result := PrismTestSuccess{Value: 100}
isFailure := FromPrism(failurePrism)
testResult := isFailure(result)(mockT)
if testResult {
t.Error("Expected FromPrism to fail for failure prism on success result")
}
})
} }

235
v2/assert/example_test.go Normal file
View File

@@ -0,0 +1,235 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package assert_test
import (
"errors"
"strings"
"testing"
"github.com/IBM/fp-go/v2/assert"
"github.com/IBM/fp-go/v2/result"
)
// Example_basicAssertions demonstrates basic equality and inequality assertions
func Example_basicAssertions() {
// This would be in a real test function
var t *testing.T // placeholder for example
// Basic equality
value := 42
assert.Equal(42)(value)(t)
// String equality
name := "Alice"
assert.Equal("Alice")(name)(t)
// Inequality
assert.NotEqual(10)(value)(t)
}
// Example_arrayAssertions demonstrates array-related assertions
func Example_arrayAssertions() {
var t *testing.T // placeholder for example
numbers := []int{1, 2, 3, 4, 5}
// Check array is not empty
assert.ArrayNotEmpty(numbers)(t)
// Check array length
assert.ArrayLength[int](5)(numbers)(t)
// Check array contains a value
assert.ArrayContains(3)(numbers)(t)
}
// Example_mapAssertions demonstrates map-related assertions
func Example_mapAssertions() {
var t *testing.T // placeholder for example
config := map[string]int{
"timeout": 30,
"retries": 3,
"maxSize": 1000,
}
// Check map is not empty
assert.RecordNotEmpty(config)(t)
// Check map length
assert.RecordLength[string, int](3)(config)(t)
// Check map contains key
assert.ContainsKey[int]("timeout")(config)(t)
// Check map does not contain key
assert.NotContainsKey[int]("unknown")(config)(t)
}
// Example_errorAssertions demonstrates error-related assertions
func Example_errorAssertions() {
var t *testing.T // placeholder for example
// Assert no error
err := doSomethingSuccessful()
assert.NoError(err)(t)
// Assert error exists
err2 := doSomethingThatFails()
assert.Error(err2)(t)
}
// Example_resultAssertions demonstrates Result type assertions
func Example_resultAssertions() {
var t *testing.T // placeholder for example
// Assert success
successResult := result.Of[int](42)
assert.Success(successResult)(t)
// Assert failure
failureResult := result.Left[int](errors.New("something went wrong"))
assert.Failure(failureResult)(t)
}
// Example_predicateAssertions demonstrates custom predicate assertions
func Example_predicateAssertions() {
var t *testing.T // placeholder for example
// Test if a number is positive
isPositive := func(n int) bool { return n > 0 }
assert.That(isPositive)(42)(t)
// Test if a string is uppercase
isUppercase := func(s string) bool { return s == strings.ToUpper(s) }
assert.That(isUppercase)("HELLO")(t)
// Test if a number is even
isEven := func(n int) bool { return n%2 == 0 }
assert.That(isEven)(10)(t)
}
// Example_allOf demonstrates combining multiple assertions
func Example_allOf() {
var t *testing.T // placeholder for example
type User struct {
Name string
Age int
Active bool
}
user := User{Name: "Alice", Age: 30, Active: true}
// Combine multiple assertions
assertions := assert.AllOf([]assert.Reader{
assert.Equal("Alice")(user.Name),
assert.Equal(30)(user.Age),
assert.Equal(true)(user.Active),
})
assertions(t)
}
// Example_runAll demonstrates running named test cases
func Example_runAll() {
var t *testing.T // placeholder for example
testcases := map[string]assert.Reader{
"addition": assert.Equal(4)(2 + 2),
"multiplication": assert.Equal(6)(2 * 3),
"subtraction": assert.Equal(1)(3 - 2),
"division": assert.Equal(2)(10 / 5),
}
assert.RunAll(testcases)(t)
}
// Example_local demonstrates focusing assertions on specific properties
func Example_local() {
var t *testing.T // placeholder for example
type User struct {
Name string
Age int
}
// Create an assertion that checks if age is positive
ageIsPositive := assert.That(func(age int) bool { return age > 0 })
// Focus this assertion on the Age field of User
userAgeIsPositive := assert.Local(func(u User) int { return u.Age })(ageIsPositive)
// Now we can test the whole User object
user := User{Name: "Alice", Age: 30}
userAgeIsPositive(user)(t)
}
// Example_composableAssertions demonstrates building complex assertions
func Example_composableAssertions() {
var t *testing.T // placeholder for example
type Config struct {
Host string
Port int
Timeout int
Retries int
}
config := Config{
Host: "localhost",
Port: 8080,
Timeout: 30,
Retries: 3,
}
// Create focused assertions for each field
validHost := assert.Local(func(c Config) string { return c.Host })(
assert.StringNotEmpty,
)
validPort := assert.Local(func(c Config) int { return c.Port })(
assert.That(func(p int) bool { return p > 0 && p < 65536 }),
)
validTimeout := assert.Local(func(c Config) int { return c.Timeout })(
assert.That(func(t int) bool { return t > 0 }),
)
validRetries := assert.Local(func(c Config) int { return c.Retries })(
assert.That(func(r int) bool { return r >= 0 }),
)
// Combine all assertions
validConfig := assert.AllOf([]assert.Reader{
validHost(config),
validPort(config),
validTimeout(config),
validRetries(config),
})
validConfig(t)
}
// Helper functions for examples
func doSomethingSuccessful() error {
return nil
}
func doSomethingThatFails() error {
return errors.New("operation failed")
}

View File

@@ -1,7 +1,22 @@
package assert package assert
import "github.com/IBM/fp-go/v2/result" import (
"testing"
"github.com/IBM/fp-go/v2/optics/lens"
"github.com/IBM/fp-go/v2/optics/optional"
"github.com/IBM/fp-go/v2/optics/prism"
"github.com/IBM/fp-go/v2/predicate"
"github.com/IBM/fp-go/v2/reader"
"github.com/IBM/fp-go/v2/result"
)
type ( type (
Result[T any] = result.Result[T] Result[T any] = result.Result[T]
Reader = reader.Reader[*testing.T, bool]
Kleisli[T any] = reader.Reader[T, Reader]
Predicate[T any] = predicate.Predicate[T]
Lens[S, T any] = lens.Lens[S, T]
Optional[S, T any] = optional.Optional[S, T]
Prism[S, T any] = prism.Prism[S, T]
) )

View File

@@ -8,5 +8,5 @@ import (
// BuilderPrism createa a [Prism] that converts between a builder and its type // BuilderPrism createa a [Prism] that converts between a builder and its type
func BuilderPrism[T any, B Builder[T]](creator func(T) B) Prism[B, T] { func BuilderPrism[T any, B Builder[T]](creator func(T) B) Prism[B, T] {
return prism.MakePrism(F.Flow2(B.Build, result.ToOption[T]), creator) return prism.MakePrismWithName(F.Flow2(B.Build, result.ToOption[T]), creator, "BuilderPrism")
} }

View File

@@ -382,7 +382,7 @@ func BenchmarkToString(b *testing.B) {
data := []byte("Hello, World!") data := []byte("Hello, World!")
b.Run("small", func(b *testing.B) { b.Run("small", func(b *testing.B) {
for i := 0; i < b.N; i++ { for b.Loop() {
_ = ToString(data) _ = ToString(data)
} }
}) })
@@ -393,7 +393,7 @@ func BenchmarkToString(b *testing.B) {
large[i] = byte(i % 256) large[i] = byte(i % 256)
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = ToString(large) _ = ToString(large)
} }
}) })
@@ -402,7 +402,7 @@ func BenchmarkToString(b *testing.B) {
func BenchmarkSize(b *testing.B) { func BenchmarkSize(b *testing.B) {
data := []byte("Hello, World!") data := []byte("Hello, World!")
for i := 0; i < b.N; i++ { for b.Loop() {
_ = Size(data) _ = Size(data)
} }
} }
@@ -412,7 +412,7 @@ func BenchmarkMonoidConcat(b *testing.B) {
c := []byte(" World") c := []byte(" World")
b.Run("small slices", func(b *testing.B) { b.Run("small slices", func(b *testing.B) {
for i := 0; i < b.N; i++ { for b.Loop() {
_ = Monoid.Concat(a, c) _ = Monoid.Concat(a, c)
} }
}) })
@@ -421,7 +421,7 @@ func BenchmarkMonoidConcat(b *testing.B) {
large1 := make([]byte, 10000) large1 := make([]byte, 10000)
large2 := make([]byte, 10000) large2 := make([]byte, 10000)
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = Monoid.Concat(large1, large2) _ = Monoid.Concat(large1, large2)
} }
}) })
@@ -436,7 +436,7 @@ func BenchmarkConcatAll(b *testing.B) {
} }
b.Run("few slices", func(b *testing.B) { b.Run("few slices", func(b *testing.B) {
for i := 0; i < b.N; i++ { for b.Loop() {
_ = ConcatAll(slices...) _ = ConcatAll(slices...)
} }
}) })
@@ -447,7 +447,7 @@ func BenchmarkConcatAll(b *testing.B) {
many[i] = []byte{byte(i)} many[i] = []byte{byte(i)}
} }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = ConcatAll(many...) _ = ConcatAll(many...)
} }
}) })
@@ -458,13 +458,13 @@ func BenchmarkOrdCompare(b *testing.B) {
c := []byte("abd") c := []byte("abd")
b.Run("equal", func(b *testing.B) { b.Run("equal", func(b *testing.B) {
for i := 0; i < b.N; i++ { for b.Loop() {
_ = Ord.Compare(a, a) _ = Ord.Compare(a, a)
} }
}) })
b.Run("different", func(b *testing.B) { b.Run("different", func(b *testing.B) {
for i := 0; i < b.N; i++ { for b.Loop() {
_ = Ord.Compare(a, c) _ = Ord.Compare(a, c)
} }
}) })
@@ -474,7 +474,7 @@ func BenchmarkOrdCompare(b *testing.B) {
large2 := make([]byte, 10000) large2 := make([]byte, 10000)
large2[9999] = 1 large2[9999] = 1
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = Ord.Compare(large1, large2) _ = Ord.Compare(large1, large2)
} }
}) })

View File

@@ -27,12 +27,14 @@ import (
"strings" "strings"
"text/template" "text/template"
S "github.com/IBM/fp-go/v2/string"
C "github.com/urfave/cli/v2" C "github.com/urfave/cli/v2"
) )
const ( const (
keyLensDir = "dir" keyLensDir = "dir"
keyVerbose = "verbose" keyVerbose = "verbose"
keyIncludeTestFile = "include-test-files"
lensAnnotation = "fp-go:Lens" lensAnnotation = "fp-go:Lens"
) )
@@ -49,11 +51,20 @@ var (
Value: false, Value: false,
Usage: "Enable verbose output", Usage: "Enable verbose output",
} }
flagIncludeTestFiles = &C.BoolFlag{
Name: keyIncludeTestFile,
Aliases: []string{"t"},
Value: false,
Usage: "Include test files (*_test.go) when scanning for annotated types",
}
) )
// structInfo holds information about a struct that needs lens generation // structInfo holds information about a struct that needs lens generation
type structInfo struct { type structInfo struct {
Name string Name string
TypeParams string // e.g., "[T any]" or "[K comparable, V any]" - for type declarations
TypeParamNames string // e.g., "[T]" or "[K, V]" - for type usage in function signatures
Fields []fieldInfo Fields []fieldInfo
Imports map[string]string // package path -> alias Imports map[string]string // package path -> alias
} }
@@ -65,6 +76,7 @@ type fieldInfo struct {
BaseType string // TypeName without leading * for pointer types BaseType string // TypeName without leading * for pointer types
IsOptional bool // true if field is a pointer or has json omitempty tag IsOptional bool // true if field is a pointer or has json omitempty tag
IsComparable bool // true if the type is comparable (can use ==) IsComparable bool // true if the type is comparable (can use ==)
IsEmbedded bool // true if this field comes from an embedded struct
} }
// templateData holds data for template rendering // templateData holds data for template rendering
@@ -75,74 +87,151 @@ type templateData struct {
const lensStructTemplate = ` const lensStructTemplate = `
// {{.Name}}Lenses provides lenses for accessing fields of {{.Name}} // {{.Name}}Lenses provides lenses for accessing fields of {{.Name}}
type {{.Name}}Lenses struct { type {{.Name}}Lenses{{.TypeParams}} struct {
// mandatory fields
{{- range .Fields}} {{- range .Fields}}
{{.Name}} {{if .IsOptional}}LO.LensO[{{$.Name}}, {{.TypeName}}]{{else}}L.Lens[{{$.Name}}, {{.TypeName}}]{{end}} {{.Name}} __lens.Lens[{{$.Name}}{{$.TypeParamNames}}, {{.TypeName}}]
{{- end}}
// optional fields
{{- range .Fields}}
{{- if .IsComparable}}
{{.Name}}O __lens_option.LensO[{{$.Name}}{{$.TypeParamNames}}, {{.TypeName}}]
{{- end}}
{{- end}} {{- end}}
} }
// {{.Name}}RefLenses provides lenses for accessing fields of {{.Name}} via a reference to {{.Name}} // {{.Name}}RefLenses provides lenses for accessing fields of {{.Name}} via a reference to {{.Name}}
type {{.Name}}RefLenses struct { type {{.Name}}RefLenses{{.TypeParams}} struct {
// mandatory fields
{{- range .Fields}} {{- range .Fields}}
{{.Name}} {{if .IsOptional}}LO.LensO[*{{$.Name}}, {{.TypeName}}]{{else}}L.Lens[*{{$.Name}}, {{.TypeName}}]{{end}} {{.Name}} __lens.Lens[*{{$.Name}}{{$.TypeParamNames}}, {{.TypeName}}]
{{- end}}
// optional fields
{{- range .Fields}}
{{- if .IsComparable}}
{{.Name}}O __lens_option.LensO[*{{$.Name}}{{$.TypeParamNames}}, {{.TypeName}}]
{{- end}}
{{- end}}
// prisms
{{- range .Fields}}
{{.Name}}P __prism.Prism[*{{$.Name}}{{$.TypeParamNames}}, {{.TypeName}}]
{{- end}}
}
// {{.Name}}Prisms provides prisms for accessing fields of {{.Name}}
type {{.Name}}Prisms{{.TypeParams}} struct {
{{- range .Fields}}
{{.Name}} __prism.Prism[{{$.Name}}{{$.TypeParamNames}}, {{.TypeName}}]
{{- end}} {{- end}}
} }
` `
const lensConstructorTemplate = ` const lensConstructorTemplate = `
// Make{{.Name}}Lenses creates a new {{.Name}}Lenses with lenses for all fields // Make{{.Name}}Lenses creates a new {{.Name}}Lenses with lenses for all fields
func Make{{.Name}}Lenses() {{.Name}}Lenses { func Make{{.Name}}Lenses{{.TypeParams}}() {{.Name}}Lenses{{.TypeParamNames}} {
// mandatory lenses
{{- range .Fields}} {{- range .Fields}}
{{- if .IsOptional}} lens{{.Name}} := __lens.MakeLensWithName(
iso{{.Name}} := I.FromZero[{{.TypeName}}]() func(s {{$.Name}}{{$.TypeParamNames}}) {{.TypeName}} { return s.{{.Name}} },
func(s {{$.Name}}{{$.TypeParamNames}}, v {{.TypeName}}) {{$.Name}}{{$.TypeParamNames}} { s.{{.Name}} = v; return s },
"{{$.Name}}{{$.TypeParamNames}}.{{.Name}}",
)
{{- end}}
// optional lenses
{{- range .Fields}}
{{- if .IsComparable}}
lens{{.Name}}O := __lens_option.FromIso[{{$.Name}}{{$.TypeParamNames}}](__iso_option.FromZero[{{.TypeName}}]())(lens{{.Name}})
{{- end}} {{- end}}
{{- end}} {{- end}}
return {{.Name}}Lenses{ return {{.Name}}Lenses{{.TypeParamNames}}{
// mandatory lenses
{{- range .Fields}} {{- range .Fields}}
{{- if .IsOptional}} {{.Name}}: lens{{.Name}},
{{.Name}}: L.MakeLens( {{- end}}
func(s {{$.Name}}) O.Option[{{.TypeName}}] { return iso{{.Name}}.Get(s.{{.Name}}) }, // optional lenses
func(s {{$.Name}}, v O.Option[{{.TypeName}}]) {{$.Name}} { s.{{.Name}} = iso{{.Name}}.ReverseGet(v); return s }, {{- range .Fields}}
), {{- if .IsComparable}}
{{- else}} {{.Name}}O: lens{{.Name}}O,
{{.Name}}: L.MakeLens(
func(s {{$.Name}}) {{.TypeName}} { return s.{{.Name}} },
func(s {{$.Name}}, v {{.TypeName}}) {{$.Name}} { s.{{.Name}} = v; return s },
),
{{- end}} {{- end}}
{{- end}} {{- end}}
} }
} }
// Make{{.Name}}RefLenses creates a new {{.Name}}RefLenses with lenses for all fields // Make{{.Name}}RefLenses creates a new {{.Name}}RefLenses with lenses for all fields
func Make{{.Name}}RefLenses() {{.Name}}RefLenses { func Make{{.Name}}RefLenses{{.TypeParams}}() {{.Name}}RefLenses{{.TypeParamNames}} {
return {{.Name}}RefLenses{ // mandatory lenses
{{- range .Fields}} {{- range .Fields}}
{{- if .IsOptional}}
{{- if .IsComparable}} {{- if .IsComparable}}
{{.Name}}: LO.FromIso[*{{$.Name}}](I.FromZero[{{.TypeName}}]())(L.MakeLensStrict( lens{{.Name}} := __lens.MakeLensStrictWithName(
func(s *{{$.Name}}) {{.TypeName}} { return s.{{.Name}} }, func(s *{{$.Name}}{{$.TypeParamNames}}) {{.TypeName}} { return s.{{.Name}} },
func(s *{{$.Name}}, v {{.TypeName}}) *{{$.Name}} { s.{{.Name}} = v; return s }, func(s *{{$.Name}}{{$.TypeParamNames}}, v {{.TypeName}}) *{{$.Name}}{{$.TypeParamNames}} { s.{{.Name}} = v; return s },
)), "(*{{$.Name}}{{$.TypeParamNames}}).{{.Name}}",
)
{{- else}} {{- else}}
{{.Name}}: LO.FromIso[*{{$.Name}}](I.FromZero[{{.TypeName}}]())(L.MakeLensRef( lens{{.Name}} := __lens.MakeLensRefWithName(
func(s *{{$.Name}}) {{.TypeName}} { return s.{{.Name}} }, func(s *{{$.Name}}{{$.TypeParamNames}}) {{.TypeName}} { return s.{{.Name}} },
func(s *{{$.Name}}, v {{.TypeName}}) *{{$.Name}} { s.{{.Name}} = v; return s }, func(s *{{$.Name}}{{$.TypeParamNames}}, v {{.TypeName}}) *{{$.Name}}{{$.TypeParamNames}} { s.{{.Name}} = v; return s },
)), "(*{{$.Name}}{{$.TypeParamNames}}).{{.Name}}",
)
{{- end}} {{- end}}
{{- else}} {{- end}}
// optional lenses
{{- range .Fields}}
{{- if .IsComparable}} {{- if .IsComparable}}
{{.Name}}: L.MakeLensStrict( lens{{.Name}}O := __lens_option.FromIso[*{{$.Name}}{{$.TypeParamNames}}](__iso_option.FromZero[{{.TypeName}}]())(lens{{.Name}})
func(s *{{$.Name}}) {{.TypeName}} { return s.{{.Name}} }, {{- end}}
func(s *{{$.Name}}, v {{.TypeName}}) *{{$.Name}} { s.{{.Name}} = v; return s }, {{- end}}
), return {{.Name}}RefLenses{{.TypeParamNames}}{
// mandatory lenses
{{- range .Fields}}
{{.Name}}: lens{{.Name}},
{{- end}}
// optional lenses
{{- range .Fields}}
{{- if .IsComparable}}
{{.Name}}O: lens{{.Name}}O,
{{- end}}
{{- end}}
}
}
// Make{{.Name}}Prisms creates a new {{.Name}}Prisms with prisms for all fields
func Make{{.Name}}Prisms{{.TypeParams}}() {{.Name}}Prisms{{.TypeParamNames}} {
{{- range .Fields}}
{{- if .IsComparable}}
_fromNonZero{{.Name}} := __option.FromNonZero[{{.TypeName}}]()
_prism{{.Name}} := __prism.MakePrismWithName(
func(s {{$.Name}}{{$.TypeParamNames}}) __option.Option[{{.TypeName}}] { return _fromNonZero{{.Name}}(s.{{.Name}}) },
func(v {{.TypeName}}) {{$.Name}}{{$.TypeParamNames}} {
{{- if .IsEmbedded}}
var result {{$.Name}}{{$.TypeParamNames}}
result.{{.Name}} = v
return result
{{- else}} {{- else}}
{{.Name}}: L.MakeLensRef( return {{$.Name}}{{$.TypeParamNames}}{ {{.Name}}: v }
func(s *{{$.Name}}) {{.TypeName}} { return s.{{.Name}} }, {{- end}}
func(s *{{$.Name}}, v {{.TypeName}}) *{{$.Name}} { s.{{.Name}} = v; return s }, },
), "{{$.Name}}{{$.TypeParamNames}}.{{.Name}}",
)
{{- else}}
_prism{{.Name}} := __prism.MakePrismWithName(
func(s {{$.Name}}{{$.TypeParamNames}}) __option.Option[{{.TypeName}}] { return __option.Some(s.{{.Name}}) },
func(v {{.TypeName}}) {{$.Name}}{{$.TypeParamNames}} {
{{- if .IsEmbedded}}
var result {{$.Name}}{{$.TypeParamNames}}
result.{{.Name}} = v
return result
{{- else}}
return {{$.Name}}{{$.TypeParamNames}}{ {{.Name}}: v }
{{- end}}
},
"{{$.Name}}{{$.TypeParamNames}}.{{.Name}}",
)
{{- end}} {{- end}}
{{- end}} {{- end}}
return {{.Name}}Prisms{{.TypeParamNames}} {
{{- range .Fields}}
{{.Name}}: _prism{{.Name}},
{{- end}} {{- end}}
} }
} }
@@ -280,9 +369,17 @@ func isPointerType(expr ast.Expr) bool {
// - Slices // - Slices
// - Maps // - Maps
// - Functions // - Functions
func isComparableType(expr ast.Expr) bool { //
// typeParams is a map of type parameter names to their constraints (e.g., "T" -> "any", "K" -> "comparable")
func isComparableType(expr ast.Expr, typeParams map[string]string) bool {
switch t := expr.(type) { switch t := expr.(type) {
case *ast.Ident: case *ast.Ident:
// Check if this is a type parameter
if constraint, isTypeParam := typeParams[t.Name]; isTypeParam {
// Type parameter - check its constraint
return constraint == "comparable"
}
// Basic types and named types // Basic types and named types
// We assume named types are comparable unless they're known non-comparable types // We assume named types are comparable unless they're known non-comparable types
name := t.Name name := t.Name
@@ -305,7 +402,7 @@ func isComparableType(expr ast.Expr) bool {
return false return false
} }
// Fixed-size array, check element type // Fixed-size array, check element type
return isComparableType(t.Elt) return isComparableType(t.Elt, typeParams)
case *ast.MapType: case *ast.MapType:
// Maps are not comparable // Maps are not comparable
return false return false
@@ -362,6 +459,7 @@ func isComparableType(expr ast.Expr) bool {
} }
} }
// For other generic types, conservatively assume not comparable // For other generic types, conservatively assume not comparable
log.Printf("Not comparable type: %v\n", t)
return false return false
case *ast.ChanType: case *ast.ChanType:
// Channel types are comparable // Channel types are comparable
@@ -372,6 +470,146 @@ func isComparableType(expr ast.Expr) bool {
} }
} }
// embeddedFieldResult holds both the field info and its AST type for import extraction
type embeddedFieldResult struct {
fieldInfo fieldInfo
fieldType ast.Expr
}
// extractEmbeddedFields extracts fields from an embedded struct type
// It returns a slice of embeddedFieldResult for all exported fields in the embedded struct
// typeParamsMap contains the type parameters of the parent struct (for checking comparability)
func extractEmbeddedFields(embedType ast.Expr, fileImports map[string]string, file *ast.File, typeParamsMap map[string]string) []embeddedFieldResult {
var results []embeddedFieldResult
// Get the type name of the embedded field
var typeName string
var typeIdent *ast.Ident
switch t := embedType.(type) {
case *ast.Ident:
// Direct embedded type: type MyStruct struct { EmbeddedType }
typeName = t.Name
typeIdent = t
case *ast.StarExpr:
// Pointer embedded type: type MyStruct struct { *EmbeddedType }
if ident, ok := t.X.(*ast.Ident); ok {
typeName = ident.Name
typeIdent = ident
}
case *ast.SelectorExpr:
// Qualified embedded type: type MyStruct struct { pkg.EmbeddedType }
// We can't easily resolve this without full type information
// For now, skip these
return results
}
if S.IsEmpty(typeName) || typeIdent == nil {
return results
}
// Find the struct definition in the same file
var embeddedStructType *ast.StructType
ast.Inspect(file, func(n ast.Node) bool {
if ts, ok := n.(*ast.TypeSpec); ok {
if ts.Name.Name == typeName {
if st, ok := ts.Type.(*ast.StructType); ok {
embeddedStructType = st
return false
}
}
}
return true
})
if embeddedStructType == nil {
// Struct not found in this file, might be from another package
return results
}
// Extract fields from the embedded struct
for _, field := range embeddedStructType.Fields.List {
// Skip embedded fields within embedded structs (for now, to avoid infinite recursion)
if len(field.Names) == 0 {
continue
}
for _, name := range field.Names {
// Only export lenses for exported fields
if name.IsExported() {
fieldTypeName := getTypeName(field.Type)
isOptional := false
baseType := fieldTypeName
// Check if field is optional
if isPointerType(field.Type) {
isOptional = true
baseType = strings.TrimPrefix(fieldTypeName, "*")
} else if hasOmitEmpty(field.Tag) {
isOptional = true
}
// Check if the type is comparable
isComparable := isComparableType(field.Type, typeParamsMap)
results = append(results, embeddedFieldResult{
fieldInfo: fieldInfo{
Name: name.Name,
TypeName: fieldTypeName,
BaseType: baseType,
IsOptional: isOptional,
IsComparable: isComparable,
IsEmbedded: true,
},
fieldType: field.Type,
})
}
}
}
return results
}
// extractTypeParams extracts type parameters from a type spec
// Returns two strings: full params like "[T any]" and names only like "[T]"
func extractTypeParams(typeSpec *ast.TypeSpec) (string, string) {
if typeSpec.TypeParams == nil || len(typeSpec.TypeParams.List) == 0 {
return "", ""
}
var params []string
var names []string
for _, field := range typeSpec.TypeParams.List {
for _, name := range field.Names {
constraint := getTypeName(field.Type)
params = append(params, name.Name+" "+constraint)
names = append(names, name.Name)
}
}
fullParams := "[" + strings.Join(params, ", ") + "]"
nameParams := "[" + strings.Join(names, ", ") + "]"
return fullParams, nameParams
}
// buildTypeParamsMap creates a map of type parameter names to their constraints
// e.g., for "type Box[T any, K comparable]", returns {"T": "any", "K": "comparable"}
func buildTypeParamsMap(typeSpec *ast.TypeSpec) map[string]string {
typeParamsMap := make(map[string]string)
if typeSpec.TypeParams == nil || len(typeSpec.TypeParams.List) == 0 {
return typeParamsMap
}
for _, field := range typeSpec.TypeParams.List {
constraint := getTypeName(field.Type)
for _, name := range field.Names {
typeParamsMap[name.Name] = constraint
}
}
return typeParamsMap
}
// parseFile parses a Go file and extracts structs with lens annotations // parseFile parses a Go file and extracts structs with lens annotations
func parseFile(filename string) ([]structInfo, string, error) { func parseFile(filename string) ([]structInfo, string, error) {
fset := token.NewFileSet() fset := token.NewFileSet()
@@ -435,9 +673,27 @@ func parseFile(filename string) ([]structInfo, string, error) {
var fields []fieldInfo var fields []fieldInfo
structImports := make(map[string]string) structImports := make(map[string]string)
// Build type parameters map for this struct
typeParamsMap := buildTypeParamsMap(typeSpec)
for _, field := range structType.Fields.List { for _, field := range structType.Fields.List {
if len(field.Names) == 0 { if len(field.Names) == 0 {
// Embedded field, skip for now // Embedded field - promote its fields
embeddedResults := extractEmbeddedFields(field.Type, fileImports, node, typeParamsMap)
for _, embResult := range embeddedResults {
// Extract imports from embedded field's type
fieldImports := make(map[string]string)
extractImports(embResult.fieldType, fieldImports)
// Resolve package names to full import paths
for pkgName := range fieldImports {
if importPath, ok := fileImports[pkgName]; ok {
structImports[importPath] = pkgName
}
}
fields = append(fields, embResult.fieldInfo)
}
continue continue
} }
for _, name := range field.Names { for _, name := range field.Names {
@@ -462,7 +718,8 @@ func parseFile(filename string) ([]structInfo, string, error) {
// Check if the type is comparable (for non-optional fields) // Check if the type is comparable (for non-optional fields)
// For optional fields, we don't need to check since they use LensO // For optional fields, we don't need to check since they use LensO
isComparable = isComparableType(field.Type) isComparable = isComparableType(field.Type, typeParamsMap)
// log.Printf("field %s, type: %v, isComparable: %b\n", name, field.Type, isComparable)
// Extract imports from this field's type // Extract imports from this field's type
fieldImports := make(map[string]string) fieldImports := make(map[string]string)
@@ -487,8 +744,11 @@ func parseFile(filename string) ([]structInfo, string, error) {
} }
if len(fields) > 0 { if len(fields) > 0 {
typeParams, typeParamNames := extractTypeParams(typeSpec)
structs = append(structs, structInfo{ structs = append(structs, structInfo{
Name: typeSpec.Name.Name, Name: typeSpec.Name.Name,
TypeParams: typeParams,
TypeParamNames: typeParamNames,
Fields: fields, Fields: fields,
Imports: structImports, Imports: structImports,
}) })
@@ -501,7 +761,7 @@ func parseFile(filename string) ([]structInfo, string, error) {
} }
// generateLensHelpers scans a directory for Go files and generates lens code // generateLensHelpers scans a directory for Go files and generates lens code
func generateLensHelpers(dir, filename string, verbose bool) error { func generateLensHelpers(dir, filename string, verbose, includeTestFiles bool) error {
// Get absolute path // Get absolute path
absDir, err := filepath.Abs(dir) absDir, err := filepath.Abs(dir)
if err != nil { if err != nil {
@@ -522,21 +782,34 @@ func generateLensHelpers(dir, filename string, verbose bool) error {
log.Printf("Found %d Go files", len(files)) log.Printf("Found %d Go files", len(files))
} }
// Parse all files and collect structs // Parse all files and collect structs, separating test and non-test files
var allStructs []structInfo var regularStructs []structInfo
var testStructs []structInfo
var packageName string var packageName string
for _, file := range files { for _, file := range files {
// Skip generated files and test files baseName := filepath.Base(file)
if strings.HasSuffix(file, "_test.go") || strings.Contains(file, "gen.go") {
// Skip generated lens files (both regular and test)
if strings.HasPrefix(baseName, "gen_lens") && strings.HasSuffix(baseName, ".go") {
if verbose { if verbose {
log.Printf("Skipping file: %s", filepath.Base(file)) log.Printf("Skipping generated lens file: %s", baseName)
}
continue
}
isTestFile := strings.HasSuffix(file, "_test.go")
// Skip test files unless includeTestFiles is true
if isTestFile && !includeTestFiles {
if verbose {
log.Printf("Skipping test file: %s", baseName)
} }
continue continue
} }
if verbose { if verbose {
log.Printf("Parsing file: %s", filepath.Base(file)) log.Printf("Parsing file: %s", baseName)
} }
structs, pkg, err := parseFile(file) structs, pkg, err := parseFile(file)
@@ -546,27 +819,52 @@ func generateLensHelpers(dir, filename string, verbose bool) error {
} }
if verbose && len(structs) > 0 { if verbose && len(structs) > 0 {
log.Printf("Found %d annotated struct(s) in %s", len(structs), filepath.Base(file)) log.Printf("Found %d annotated struct(s) in %s", len(structs), baseName)
for _, s := range structs { for _, s := range structs {
log.Printf(" - %s (%d fields)", s.Name, len(s.Fields)) log.Printf(" - %s (%d fields)", s.Name, len(s.Fields))
} }
} }
if packageName == "" { if S.IsEmpty(packageName) {
packageName = pkg packageName = pkg
} }
allStructs = append(allStructs, structs...) // Separate structs based on source file type
if isTestFile {
testStructs = append(testStructs, structs...)
} else {
regularStructs = append(regularStructs, structs...)
}
} }
if len(allStructs) == 0 { if len(regularStructs) == 0 && len(testStructs) == 0 {
log.Printf("No structs with %s annotation found in %s", lensAnnotation, absDir) log.Printf("No structs with %s annotation found in %s", lensAnnotation, absDir)
return nil return nil
} }
// Generate regular lens file if there are regular structs
if len(regularStructs) > 0 {
if err := generateLensFile(absDir, filename, packageName, regularStructs, verbose); err != nil {
return err
}
}
// Generate test lens file if there are test structs
if len(testStructs) > 0 {
testFilename := strings.TrimSuffix(filename, ".go") + "_test.go"
if err := generateLensFile(absDir, testFilename, packageName, testStructs, verbose); err != nil {
return err
}
}
return nil
}
// generateLensFile generates a lens file for the given structs
func generateLensFile(absDir, filename, packageName string, structs []structInfo, verbose bool) error {
// Collect all unique imports from all structs // Collect all unique imports from all structs
allImports := make(map[string]string) // import path -> alias allImports := make(map[string]string) // import path -> alias
for _, s := range allStructs { for _, s := range structs {
for importPath, alias := range s.Imports { for importPath, alias := range s.Imports {
allImports[importPath] = alias allImports[importPath] = alias
} }
@@ -580,7 +878,7 @@ func generateLensHelpers(dir, filename string, verbose bool) error {
} }
defer f.Close() defer f.Close()
log.Printf("Generating lens code in [%s] for package [%s] with [%d] structs ...", outPath, packageName, len(allStructs)) log.Printf("Generating lens code in [%s] for package [%s] with [%d] structs ...", outPath, packageName, len(structs))
// Write header // Write header
writePackage(f, packageName) writePackage(f, packageName)
@@ -588,10 +886,11 @@ func generateLensHelpers(dir, filename string, verbose bool) error {
// Write imports // Write imports
f.WriteString("import (\n") f.WriteString("import (\n")
// Standard fp-go imports always needed // Standard fp-go imports always needed
f.WriteString("\tL \"github.com/IBM/fp-go/v2/optics/lens\"\n") f.WriteString("\t__lens \"github.com/IBM/fp-go/v2/optics/lens\"\n")
f.WriteString("\tLO \"github.com/IBM/fp-go/v2/optics/lens/option\"\n") f.WriteString("\t__option \"github.com/IBM/fp-go/v2/option\"\n")
f.WriteString("\tO \"github.com/IBM/fp-go/v2/option\"\n") f.WriteString("\t__prism \"github.com/IBM/fp-go/v2/optics/prism\"\n")
f.WriteString("\tI \"github.com/IBM/fp-go/v2/optics/iso/option\"\n") f.WriteString("\t__lens_option \"github.com/IBM/fp-go/v2/optics/lens/option\"\n")
f.WriteString("\t__iso_option \"github.com/IBM/fp-go/v2/optics/iso/option\"\n")
// Add additional imports collected from field types // Add additional imports collected from field types
for importPath, alias := range allImports { for importPath, alias := range allImports {
@@ -601,7 +900,7 @@ func generateLensHelpers(dir, filename string, verbose bool) error {
f.WriteString(")\n") f.WriteString(")\n")
// Generate lens code for each struct using templates // Generate lens code for each struct using templates
for _, s := range allStructs { for _, s := range structs {
var buf bytes.Buffer var buf bytes.Buffer
// Generate struct type // Generate struct type
@@ -633,12 +932,14 @@ func LensCommand() *C.Command {
flagLensDir, flagLensDir,
flagFilename, flagFilename,
flagVerbose, flagVerbose,
flagIncludeTestFiles,
}, },
Action: func(ctx *C.Context) error { Action: func(ctx *C.Context) error {
return generateLensHelpers( return generateLensHelpers(
ctx.String(keyLensDir), ctx.String(keyLensDir),
ctx.String(keyFilename), ctx.String(keyFilename),
ctx.Bool(keyVerbose), ctx.Bool(keyVerbose),
ctx.Bool(keyIncludeTestFile),
) )
}, },
} }

View File

@@ -25,6 +25,7 @@ import (
"strings" "strings"
"testing" "testing"
S "github.com/IBM/fp-go/v2/string"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -60,7 +61,7 @@ func TestHasLensAnnotation(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
var doc *ast.CommentGroup var doc *ast.CommentGroup
if tt.comment != "" { if S.IsNonEmpty(tt.comment) {
doc = &ast.CommentGroup{ doc = &ast.CommentGroup{
List: []*ast.Comment{ List: []*ast.Comment{
{Text: tt.comment}, {Text: tt.comment},
@@ -247,7 +248,7 @@ func TestIsComparableType(t *testing.T) {
}) })
require.NotNil(t, fieldType) require.NotNil(t, fieldType)
result := isComparableType(fieldType) result := isComparableType(fieldType, map[string]string{})
assert.Equal(t, tt.expected, result) assert.Equal(t, tt.expected, result)
}) })
} }
@@ -289,7 +290,7 @@ func TestHasOmitEmpty(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
var tag *ast.BasicLit var tag *ast.BasicLit
if tt.tag != "" { if S.IsNonEmpty(tt.tag) {
tag = &ast.BasicLit{ tag = &ast.BasicLit{
Value: tt.tag, Value: tt.tag,
} }
@@ -326,7 +327,7 @@ type Other struct {
} }
` `
err := os.WriteFile(testFile, []byte(testCode), 0644) err := os.WriteFile(testFile, []byte(testCode), 0o644)
require.NoError(t, err) require.NoError(t, err)
// Parse the file // Parse the file
@@ -380,7 +381,7 @@ type Config struct {
} }
` `
err := os.WriteFile(testFile, []byte(testCode), 0644) err := os.WriteFile(testFile, []byte(testCode), 0o644)
require.NoError(t, err) require.NoError(t, err)
// Parse the file // Parse the file
@@ -440,7 +441,7 @@ type TypeTest struct {
} }
` `
err := os.WriteFile(testFile, []byte(testCode), 0644) err := os.WriteFile(testFile, []byte(testCode), 0o644)
require.NoError(t, err) require.NoError(t, err)
// Parse the file // Parse the file
@@ -514,16 +515,16 @@ func TestLensRefTemplatesWithComparable(t *testing.T) {
assert.Contains(t, constructorStr, "func MakeTestStructRefLenses() TestStructRefLenses") assert.Contains(t, constructorStr, "func MakeTestStructRefLenses() TestStructRefLenses")
// Name field - comparable, should use MakeLensStrict // Name field - comparable, should use MakeLensStrict
assert.Contains(t, constructorStr, "Name: L.MakeLensStrict(", assert.Contains(t, constructorStr, "lensName := __lens.MakeLensStrictWithName(",
"comparable field Name should use MakeLensStrict in RefLenses") "comparable field Name should use MakeLensStrictWithName in RefLenses")
// Age field - comparable, should use MakeLensStrict // Age field - comparable, should use MakeLensStrict
assert.Contains(t, constructorStr, "Age: L.MakeLensStrict(", assert.Contains(t, constructorStr, "lensAge := __lens.MakeLensStrictWithName(",
"comparable field Age should use MakeLensStrict in RefLenses") "comparable field Age should use MakeLensStrictWithName in RefLenses")
// Data field - not comparable, should use MakeLensRef // Data field - not comparable, should use MakeLensRef
assert.Contains(t, constructorStr, "Data: L.MakeLensRef(", assert.Contains(t, constructorStr, "lensData := __lens.MakeLensRefWithName(",
"non-comparable field Data should use MakeLensRef in RefLenses") "non-comparable field Data should use MakeLensRefWithName in RefLenses")
} }
@@ -542,12 +543,12 @@ type TestStruct struct {
` `
testFile := filepath.Join(tmpDir, "test.go") testFile := filepath.Join(tmpDir, "test.go")
err := os.WriteFile(testFile, []byte(testCode), 0644) err := os.WriteFile(testFile, []byte(testCode), 0o644)
require.NoError(t, err) require.NoError(t, err)
// Generate lens code // Generate lens code
outputFile := "gen.go" outputFile := "gen.go"
err = generateLensHelpers(tmpDir, outputFile, false) err = generateLensHelpers(tmpDir, outputFile, false, false)
require.NoError(t, err) require.NoError(t, err)
// Verify the generated file exists // Verify the generated file exists
@@ -564,23 +565,23 @@ type TestStruct struct {
// Check for expected content in RefLenses // Check for expected content in RefLenses
assert.Contains(t, contentStr, "MakeTestStructRefLenses") assert.Contains(t, contentStr, "MakeTestStructRefLenses")
// Name and Count are comparable, should use MakeLensStrict // Name and Count are comparable, should use MakeLensStrictWithName
assert.Contains(t, contentStr, "L.MakeLensStrict", assert.Contains(t, contentStr, "__lens.MakeLensStrictWithName",
"comparable fields should use MakeLensStrict in RefLenses") "comparable fields should use MakeLensStrictWithName in RefLenses")
// Data is not comparable (slice), should use MakeLensRef // Data is not comparable (slice), should use MakeLensRefWithName
assert.Contains(t, contentStr, "L.MakeLensRef", assert.Contains(t, contentStr, "__lens.MakeLensRefWithName",
"non-comparable fields should use MakeLensRef in RefLenses") "non-comparable fields should use MakeLensRefWithName in RefLenses")
// Verify the pattern appears for Name field (comparable) // Verify the pattern appears for Name field (comparable)
namePattern := "Name: L.MakeLensStrict(" namePattern := "lensName := __lens.MakeLensStrictWithName("
assert.Contains(t, contentStr, namePattern, assert.Contains(t, contentStr, namePattern,
"Name field should use MakeLensStrict") "Name field should use MakeLensStrictWithName")
// Verify the pattern appears for Data field (not comparable) // Verify the pattern appears for Data field (not comparable)
dataPattern := "Data: L.MakeLensRef(" dataPattern := "lensData := __lens.MakeLensRefWithName("
assert.Contains(t, contentStr, dataPattern, assert.Contains(t, contentStr, dataPattern,
"Data field should use MakeLensRef") "Data field should use MakeLensRefWithName")
} }
func TestGenerateLensHelpers(t *testing.T) { func TestGenerateLensHelpers(t *testing.T) {
@@ -597,12 +598,12 @@ type TestStruct struct {
` `
testFile := filepath.Join(tmpDir, "test.go") testFile := filepath.Join(tmpDir, "test.go")
err := os.WriteFile(testFile, []byte(testCode), 0644) err := os.WriteFile(testFile, []byte(testCode), 0o644)
require.NoError(t, err) require.NoError(t, err)
// Generate lens code // Generate lens code
outputFile := "gen.go" outputFile := "gen.go"
err = generateLensHelpers(tmpDir, outputFile, false) err = generateLensHelpers(tmpDir, outputFile, false, false)
require.NoError(t, err) require.NoError(t, err)
// Verify the generated file exists // Verify the generated file exists
@@ -619,11 +620,11 @@ type TestStruct struct {
// Check for expected content // Check for expected content
assert.Contains(t, contentStr, "package testpkg") assert.Contains(t, contentStr, "package testpkg")
assert.Contains(t, contentStr, "Code generated by go generate") assert.Contains(t, contentStr, "Code generated by go generate")
assert.Contains(t, contentStr, "TestStructLens") assert.Contains(t, contentStr, "TestStructLenses")
assert.Contains(t, contentStr, "MakeTestStructLens") assert.Contains(t, contentStr, "MakeTestStructLenses")
assert.Contains(t, contentStr, "L.Lens[TestStruct, string]") assert.Contains(t, contentStr, "__lens.Lens[TestStruct, string]")
assert.Contains(t, contentStr, "LO.LensO[TestStruct, *int]") assert.Contains(t, contentStr, "__lens_option.LensO[TestStruct, *int]")
assert.Contains(t, contentStr, "I.FromZero") assert.Contains(t, contentStr, "__iso_option.FromZero")
} }
func TestGenerateLensHelpersNoAnnotations(t *testing.T) { func TestGenerateLensHelpersNoAnnotations(t *testing.T) {
@@ -639,12 +640,12 @@ type TestStruct struct {
` `
testFile := filepath.Join(tmpDir, "test.go") testFile := filepath.Join(tmpDir, "test.go")
err := os.WriteFile(testFile, []byte(testCode), 0644) err := os.WriteFile(testFile, []byte(testCode), 0o644)
require.NoError(t, err) require.NoError(t, err)
// Generate lens code (should not create file) // Generate lens code (should not create file)
outputFile := "gen.go" outputFile := "gen.go"
err = generateLensHelpers(tmpDir, outputFile, false) err = generateLensHelpers(tmpDir, outputFile, false, false)
require.NoError(t, err) require.NoError(t, err)
// Verify the generated file does not exist // Verify the generated file does not exist
@@ -657,8 +658,8 @@ func TestLensTemplates(t *testing.T) {
s := structInfo{ s := structInfo{
Name: "TestStruct", Name: "TestStruct",
Fields: []fieldInfo{ Fields: []fieldInfo{
{Name: "Name", TypeName: "string", IsOptional: false}, {Name: "Name", TypeName: "string", IsOptional: false, IsComparable: true},
{Name: "Value", TypeName: "*int", IsOptional: true}, {Name: "Value", TypeName: "*int", IsOptional: true, IsComparable: true},
}, },
} }
@@ -669,8 +670,10 @@ func TestLensTemplates(t *testing.T) {
structStr := structBuf.String() structStr := structBuf.String()
assert.Contains(t, structStr, "type TestStructLenses struct") assert.Contains(t, structStr, "type TestStructLenses struct")
assert.Contains(t, structStr, "Name L.Lens[TestStruct, string]") assert.Contains(t, structStr, "Name __lens.Lens[TestStruct, string]")
assert.Contains(t, structStr, "Value LO.LensO[TestStruct, *int]") assert.Contains(t, structStr, "NameO __lens_option.LensO[TestStruct, string]")
assert.Contains(t, structStr, "Value __lens.Lens[TestStruct, *int]")
assert.Contains(t, structStr, "ValueO __lens_option.LensO[TestStruct, *int]")
// Test constructor template // Test constructor template
var constructorBuf bytes.Buffer var constructorBuf bytes.Buffer
@@ -680,19 +683,21 @@ func TestLensTemplates(t *testing.T) {
constructorStr := constructorBuf.String() constructorStr := constructorBuf.String()
assert.Contains(t, constructorStr, "func MakeTestStructLenses() TestStructLenses") assert.Contains(t, constructorStr, "func MakeTestStructLenses() TestStructLenses")
assert.Contains(t, constructorStr, "return TestStructLenses{") assert.Contains(t, constructorStr, "return TestStructLenses{")
assert.Contains(t, constructorStr, "Name: L.MakeLens(") assert.Contains(t, constructorStr, "Name: lensName,")
assert.Contains(t, constructorStr, "Value: L.MakeLens(") assert.Contains(t, constructorStr, "NameO: lensNameO,")
assert.Contains(t, constructorStr, "I.FromZero") assert.Contains(t, constructorStr, "Value: lensValue,")
assert.Contains(t, constructorStr, "ValueO: lensValueO,")
assert.Contains(t, constructorStr, "__iso_option.FromZero")
} }
func TestLensTemplatesWithOmitEmpty(t *testing.T) { func TestLensTemplatesWithOmitEmpty(t *testing.T) {
s := structInfo{ s := structInfo{
Name: "ConfigStruct", Name: "ConfigStruct",
Fields: []fieldInfo{ Fields: []fieldInfo{
{Name: "Name", TypeName: "string", IsOptional: false}, {Name: "Name", TypeName: "string", IsOptional: false, IsComparable: true},
{Name: "Value", TypeName: "string", IsOptional: true}, // non-pointer with omitempty {Name: "Value", TypeName: "string", IsOptional: true, IsComparable: true}, // non-pointer with omitempty
{Name: "Count", TypeName: "int", IsOptional: true}, // non-pointer with omitempty {Name: "Count", TypeName: "int", IsOptional: true, IsComparable: true}, // non-pointer with omitempty
{Name: "Pointer", TypeName: "*string", IsOptional: true}, // pointer {Name: "Pointer", TypeName: "*string", IsOptional: true, IsComparable: true}, // pointer
}, },
} }
@@ -703,10 +708,14 @@ func TestLensTemplatesWithOmitEmpty(t *testing.T) {
structStr := structBuf.String() structStr := structBuf.String()
assert.Contains(t, structStr, "type ConfigStructLenses struct") assert.Contains(t, structStr, "type ConfigStructLenses struct")
assert.Contains(t, structStr, "Name L.Lens[ConfigStruct, string]") assert.Contains(t, structStr, "Name __lens.Lens[ConfigStruct, string]")
assert.Contains(t, structStr, "Value LO.LensO[ConfigStruct, string]", "non-pointer with omitempty should use LensO") assert.Contains(t, structStr, "NameO __lens_option.LensO[ConfigStruct, string]")
assert.Contains(t, structStr, "Count LO.LensO[ConfigStruct, int]", "non-pointer with omitempty should use LensO") assert.Contains(t, structStr, "Value __lens.Lens[ConfigStruct, string]")
assert.Contains(t, structStr, "Pointer LO.LensO[ConfigStruct, *string]") assert.Contains(t, structStr, "ValueO __lens_option.LensO[ConfigStruct, string]", "comparable non-pointer with omitempty should have optional lens")
assert.Contains(t, structStr, "Count __lens.Lens[ConfigStruct, int]")
assert.Contains(t, structStr, "CountO __lens_option.LensO[ConfigStruct, int]", "comparable non-pointer with omitempty should have optional lens")
assert.Contains(t, structStr, "Pointer __lens.Lens[ConfigStruct, *string]")
assert.Contains(t, structStr, "PointerO __lens_option.LensO[ConfigStruct, *string]")
// Test constructor template // Test constructor template
var constructorBuf bytes.Buffer var constructorBuf bytes.Buffer
@@ -715,9 +724,9 @@ func TestLensTemplatesWithOmitEmpty(t *testing.T) {
constructorStr := constructorBuf.String() constructorStr := constructorBuf.String()
assert.Contains(t, constructorStr, "func MakeConfigStructLenses() ConfigStructLenses") assert.Contains(t, constructorStr, "func MakeConfigStructLenses() ConfigStructLenses")
assert.Contains(t, constructorStr, "isoValue := I.FromZero[string]()") assert.Contains(t, constructorStr, "__iso_option.FromZero[string]()")
assert.Contains(t, constructorStr, "isoCount := I.FromZero[int]()") assert.Contains(t, constructorStr, "__iso_option.FromZero[int]()")
assert.Contains(t, constructorStr, "isoPointer := I.FromZero[*string]()") assert.Contains(t, constructorStr, "__iso_option.FromZero[*string]()")
} }
func TestLensCommandFlags(t *testing.T) { func TestLensCommandFlags(t *testing.T) {
@@ -726,12 +735,12 @@ func TestLensCommandFlags(t *testing.T) {
assert.Equal(t, "lens", cmd.Name) assert.Equal(t, "lens", cmd.Name)
assert.Equal(t, "generate lens code for annotated structs", cmd.Usage) assert.Equal(t, "generate lens code for annotated structs", cmd.Usage)
assert.Contains(t, strings.ToLower(cmd.Description), "fp-go:lens") assert.Contains(t, strings.ToLower(cmd.Description), "fp-go:lens")
assert.Contains(t, strings.ToLower(cmd.Description), "lenso") assert.Contains(t, strings.ToLower(cmd.Description), "lenso", "Description should mention LensO for optional lenses")
// Check flags // Check flags
assert.Len(t, cmd.Flags, 3) assert.Len(t, cmd.Flags, 4)
var hasDir, hasFilename, hasVerbose bool var hasDir, hasFilename, hasVerbose, hasIncludeTestFiles bool
for _, flag := range cmd.Flags { for _, flag := range cmd.Flags {
switch flag.Names()[0] { switch flag.Names()[0] {
case "dir": case "dir":
@@ -740,10 +749,340 @@ func TestLensCommandFlags(t *testing.T) {
hasFilename = true hasFilename = true
case "verbose": case "verbose":
hasVerbose = true hasVerbose = true
case "include-test-files":
hasIncludeTestFiles = true
} }
} }
assert.True(t, hasDir, "should have dir flag") assert.True(t, hasDir, "should have dir flag")
assert.True(t, hasFilename, "should have filename flag") assert.True(t, hasFilename, "should have filename flag")
assert.True(t, hasVerbose, "should have verbose flag") assert.True(t, hasVerbose, "should have verbose flag")
assert.True(t, hasIncludeTestFiles, "should have include-test-files flag")
}
func TestParseFileWithEmbeddedStruct(t *testing.T) {
// Create a temporary test file
tmpDir := t.TempDir()
testFile := filepath.Join(tmpDir, "test.go")
testCode := `package testpkg
// Base struct to be embedded
type Base struct {
ID int
Name string
}
// fp-go:Lens
type Extended struct {
Base
Extra string
}
`
err := os.WriteFile(testFile, []byte(testCode), 0o644)
require.NoError(t, err)
// Parse the file
structs, pkg, err := parseFile(testFile)
require.NoError(t, err)
// Verify results
assert.Equal(t, "testpkg", pkg)
assert.Len(t, structs, 1)
// Check Extended struct
extended := structs[0]
assert.Equal(t, "Extended", extended.Name)
assert.Len(t, extended.Fields, 3, "Should have 3 fields: ID, Name (from Base), and Extra")
// Check that embedded fields are promoted
fieldNames := make(map[string]bool)
for _, field := range extended.Fields {
fieldNames[field.Name] = true
}
assert.True(t, fieldNames["ID"], "Should have promoted ID field from Base")
assert.True(t, fieldNames["Name"], "Should have promoted Name field from Base")
assert.True(t, fieldNames["Extra"], "Should have Extra field")
}
func TestGenerateLensHelpersWithEmbeddedStruct(t *testing.T) {
// Create a temporary directory with test files
tmpDir := t.TempDir()
testCode := `package testpkg
// Base struct to be embedded
type Address struct {
Street string
City string
}
// fp-go:Lens
type Person struct {
Address
Name string
Age int
}
`
testFile := filepath.Join(tmpDir, "test.go")
err := os.WriteFile(testFile, []byte(testCode), 0o644)
require.NoError(t, err)
// Generate lens code
outputFile := "gen.go"
err = generateLensHelpers(tmpDir, outputFile, false, false)
require.NoError(t, err)
// Verify the generated file exists
genPath := filepath.Join(tmpDir, outputFile)
_, err = os.Stat(genPath)
require.NoError(t, err)
// Read and verify the generated content
content, err := os.ReadFile(genPath)
require.NoError(t, err)
contentStr := string(content)
// Check for expected content
assert.Contains(t, contentStr, "package testpkg")
assert.Contains(t, contentStr, "PersonLenses")
assert.Contains(t, contentStr, "MakePersonLenses")
// Check that embedded fields are included
assert.Contains(t, contentStr, "Street __lens.Lens[Person, string]", "Should have lens for embedded Street field")
assert.Contains(t, contentStr, "City __lens.Lens[Person, string]", "Should have lens for embedded City field")
assert.Contains(t, contentStr, "Name __lens.Lens[Person, string]", "Should have lens for Name field")
assert.Contains(t, contentStr, "Age __lens.Lens[Person, int]", "Should have lens for Age field")
// Check that optional lenses are also generated for embedded fields
assert.Contains(t, contentStr, "StreetO __lens_option.LensO[Person, string]")
assert.Contains(t, contentStr, "CityO __lens_option.LensO[Person, string]")
}
func TestParseFileWithPointerEmbeddedStruct(t *testing.T) {
// Create a temporary test file
tmpDir := t.TempDir()
testFile := filepath.Join(tmpDir, "test.go")
testCode := `package testpkg
// Base struct to be embedded
type Metadata struct {
CreatedAt string
UpdatedAt string
}
// fp-go:Lens
type Document struct {
*Metadata
Title string
Content string
}
`
err := os.WriteFile(testFile, []byte(testCode), 0o644)
require.NoError(t, err)
// Parse the file
structs, pkg, err := parseFile(testFile)
require.NoError(t, err)
// Verify results
assert.Equal(t, "testpkg", pkg)
assert.Len(t, structs, 1)
// Check Document struct
doc := structs[0]
assert.Equal(t, "Document", doc.Name)
assert.Len(t, doc.Fields, 4, "Should have 4 fields: CreatedAt, UpdatedAt (from *Metadata), Title, and Content")
// Check that embedded fields are promoted
fieldNames := make(map[string]bool)
for _, field := range doc.Fields {
fieldNames[field.Name] = true
}
assert.True(t, fieldNames["CreatedAt"], "Should have promoted CreatedAt field from *Metadata")
assert.True(t, fieldNames["UpdatedAt"], "Should have promoted UpdatedAt field from *Metadata")
assert.True(t, fieldNames["Title"], "Should have Title field")
assert.True(t, fieldNames["Content"], "Should have Content field")
}
func TestParseFileWithGenericStruct(t *testing.T) {
// Create a temporary test file
tmpDir := t.TempDir()
testFile := filepath.Join(tmpDir, "test.go")
testCode := `package testpkg
// fp-go:Lens
type Container[T any] struct {
Value T
Count int
}
`
err := os.WriteFile(testFile, []byte(testCode), 0o644)
require.NoError(t, err)
// Parse the file
structs, pkg, err := parseFile(testFile)
require.NoError(t, err)
// Verify results
assert.Equal(t, "testpkg", pkg)
assert.Len(t, structs, 1)
// Check Container struct
container := structs[0]
assert.Equal(t, "Container", container.Name)
assert.Equal(t, "[T any]", container.TypeParams, "Should have type parameter [T any]")
assert.Len(t, container.Fields, 2)
assert.Equal(t, "Value", container.Fields[0].Name)
assert.Equal(t, "T", container.Fields[0].TypeName)
assert.Equal(t, "Count", container.Fields[1].Name)
assert.Equal(t, "int", container.Fields[1].TypeName)
}
func TestParseFileWithMultipleTypeParams(t *testing.T) {
// Create a temporary test file
tmpDir := t.TempDir()
testFile := filepath.Join(tmpDir, "test.go")
testCode := `package testpkg
// fp-go:Lens
type Pair[K comparable, V any] struct {
Key K
Value V
}
`
err := os.WriteFile(testFile, []byte(testCode), 0o644)
require.NoError(t, err)
// Parse the file
structs, pkg, err := parseFile(testFile)
require.NoError(t, err)
// Verify results
assert.Equal(t, "testpkg", pkg)
assert.Len(t, structs, 1)
// Check Pair struct
pair := structs[0]
assert.Equal(t, "Pair", pair.Name)
assert.Equal(t, "[K comparable, V any]", pair.TypeParams, "Should have type parameters [K comparable, V any]")
assert.Len(t, pair.Fields, 2)
assert.Equal(t, "Key", pair.Fields[0].Name)
assert.Equal(t, "K", pair.Fields[0].TypeName)
assert.Equal(t, "Value", pair.Fields[1].Name)
assert.Equal(t, "V", pair.Fields[1].TypeName)
}
func TestGenerateLensHelpersWithGenericStruct(t *testing.T) {
// Create a temporary directory with test files
tmpDir := t.TempDir()
testCode := `package testpkg
// fp-go:Lens
type Box[T any] struct {
Content T
Label string
}
`
testFile := filepath.Join(tmpDir, "test.go")
err := os.WriteFile(testFile, []byte(testCode), 0o644)
require.NoError(t, err)
// Generate lens code
outputFile := "gen.go"
err = generateLensHelpers(tmpDir, outputFile, false, false)
require.NoError(t, err)
// Verify the generated file exists
genPath := filepath.Join(tmpDir, outputFile)
_, err = os.Stat(genPath)
require.NoError(t, err)
// Read and verify the generated content
content, err := os.ReadFile(genPath)
require.NoError(t, err)
contentStr := string(content)
// Check for expected content with type parameters
assert.Contains(t, contentStr, "package testpkg")
assert.Contains(t, contentStr, "type BoxLenses[T any] struct", "Should have generic BoxLenses type")
assert.Contains(t, contentStr, "type BoxRefLenses[T any] struct", "Should have generic BoxRefLenses type")
assert.Contains(t, contentStr, "func MakeBoxLenses[T any]() BoxLenses[T]", "Should have generic constructor")
assert.Contains(t, contentStr, "func MakeBoxRefLenses[T any]() BoxRefLenses[T]", "Should have generic ref constructor")
// Check that fields use the generic type parameter
assert.Contains(t, contentStr, "Content __lens.Lens[Box[T], T]", "Should have lens for generic Content field")
assert.Contains(t, contentStr, "Label __lens.Lens[Box[T], string]", "Should have lens for Label field")
// Check optional lenses - only for comparable types
// T any is not comparable, so ContentO should NOT be generated
assert.NotContains(t, contentStr, "ContentO __lens_option.LensO[Box[T], T]", "T any is not comparable, should not have optional lens")
// string is comparable, so LabelO should be generated
assert.Contains(t, contentStr, "LabelO __lens_option.LensO[Box[T], string]", "string is comparable, should have optional lens")
}
func TestGenerateLensHelpersWithComparableTypeParam(t *testing.T) {
// Create a temporary directory with test files
tmpDir := t.TempDir()
testCode := `package testpkg
// fp-go:Lens
type ComparableBox[T comparable] struct {
Key T
Value string
}
`
testFile := filepath.Join(tmpDir, "test.go")
err := os.WriteFile(testFile, []byte(testCode), 0o644)
require.NoError(t, err)
// Generate lens code
outputFile := "gen.go"
err = generateLensHelpers(tmpDir, outputFile, false, false)
require.NoError(t, err)
// Verify the generated file exists
genPath := filepath.Join(tmpDir, outputFile)
_, err = os.Stat(genPath)
require.NoError(t, err)
// Read and verify the generated content
content, err := os.ReadFile(genPath)
require.NoError(t, err)
contentStr := string(content)
// Check for expected content with type parameters
assert.Contains(t, contentStr, "package testpkg")
assert.Contains(t, contentStr, "type ComparableBoxLenses[T comparable] struct", "Should have generic ComparableBoxLenses type")
assert.Contains(t, contentStr, "type ComparableBoxRefLenses[T comparable] struct", "Should have generic ComparableBoxRefLenses type")
// Check that Key field (with comparable constraint) uses MakeLensStrict in RefLenses
assert.Contains(t, contentStr, "lensKey := __lens.MakeLensStrictWithName(", "Key field with comparable constraint should use MakeLensStrictWithName")
// Check that Value field (string, always comparable) also uses MakeLensStrict
assert.Contains(t, contentStr, "lensValue := __lens.MakeLensStrictWithName(", "Value field (string) should use MakeLensStrictWithName")
// Verify that MakeLensRef is NOT used (since both fields are comparable)
assert.NotContains(t, contentStr, "__lens.MakeLensRefWithName(", "Should not use MakeLensRefWithName when all fields are comparable")
} }

View File

@@ -19,6 +19,8 @@ import (
"fmt" "fmt"
"os" "os"
"strings" "strings"
S "github.com/IBM/fp-go/v2/string"
) )
// Deprecated: // Deprecated:
@@ -176,7 +178,7 @@ func generateTraverseTuple1(
} }
fmt.Fprintf(f, "F%d ~func(A%d) %s", j+1, j+1, hkt(fmt.Sprintf("T%d", j+1))) fmt.Fprintf(f, "F%d ~func(A%d) %s", j+1, j+1, hkt(fmt.Sprintf("T%d", j+1)))
} }
if infix != "" { if S.IsNonEmpty(infix) {
fmt.Fprintf(f, ", %s", infix) fmt.Fprintf(f, ", %s", infix)
} }
// types // types
@@ -209,7 +211,7 @@ func generateTraverseTuple1(
fmt.Fprintf(f, " return A.TraverseTuple%d(\n", i) fmt.Fprintf(f, " return A.TraverseTuple%d(\n", i)
// map // map
fmt.Fprintf(f, " Map[") fmt.Fprintf(f, " Map[")
if infix != "" { if S.IsNonEmpty(infix) {
fmt.Fprintf(f, "%s, T1,", infix) fmt.Fprintf(f, "%s, T1,", infix)
} else { } else {
fmt.Fprintf(f, "T1,") fmt.Fprintf(f, "T1,")
@@ -231,7 +233,7 @@ func generateTraverseTuple1(
fmt.Fprintf(f, " ") fmt.Fprintf(f, " ")
} }
fmt.Fprintf(f, "%s", tuple) fmt.Fprintf(f, "%s", tuple)
if infix != "" { if S.IsNonEmpty(infix) {
fmt.Fprintf(f, ", %s", infix) fmt.Fprintf(f, ", %s", infix)
} }
fmt.Fprintf(f, ", T%d],\n", j+1) fmt.Fprintf(f, ", T%d],\n", j+1)
@@ -256,11 +258,11 @@ func generateSequenceTuple1(
fmt.Fprintf(f, "\n// SequenceTuple%d converts a [Tuple%d] of [%s] into an [%s].\n", i, i, hkt("T"), hkt(fmt.Sprintf("Tuple%d", i))) fmt.Fprintf(f, "\n// SequenceTuple%d converts a [Tuple%d] of [%s] into an [%s].\n", i, i, hkt("T"), hkt(fmt.Sprintf("Tuple%d", i)))
fmt.Fprintf(f, "func SequenceTuple%d[", i) fmt.Fprintf(f, "func SequenceTuple%d[", i)
if infix != "" { if S.IsNonEmpty(infix) {
fmt.Fprintf(f, "%s", infix) fmt.Fprintf(f, "%s", infix)
} }
for j := 0; j < i; j++ { for j := 0; j < i; j++ {
if infix != "" || j > 0 { if S.IsNonEmpty(infix) || j > 0 {
fmt.Fprintf(f, ", ") fmt.Fprintf(f, ", ")
} }
fmt.Fprintf(f, "T%d", j+1) fmt.Fprintf(f, "T%d", j+1)
@@ -276,7 +278,7 @@ func generateSequenceTuple1(
fmt.Fprintf(f, " return A.SequenceTuple%d(\n", i) fmt.Fprintf(f, " return A.SequenceTuple%d(\n", i)
// map // map
fmt.Fprintf(f, " Map[") fmt.Fprintf(f, " Map[")
if infix != "" { if S.IsNonEmpty(infix) {
fmt.Fprintf(f, "%s, T1,", infix) fmt.Fprintf(f, "%s, T1,", infix)
} else { } else {
fmt.Fprintf(f, "T1,") fmt.Fprintf(f, "T1,")
@@ -298,7 +300,7 @@ func generateSequenceTuple1(
fmt.Fprintf(f, " ") fmt.Fprintf(f, " ")
} }
fmt.Fprintf(f, "%s", tuple) fmt.Fprintf(f, "%s", tuple)
if infix != "" { if S.IsNonEmpty(infix) {
fmt.Fprintf(f, ", %s", infix) fmt.Fprintf(f, ", %s", infix)
} }
fmt.Fprintf(f, ", T%d],\n", j+1) fmt.Fprintf(f, ", T%d],\n", j+1)
@@ -319,11 +321,11 @@ func generateSequenceT1(
fmt.Fprintf(f, "\n// SequenceT%d converts %d parameters of [%s] into a [%s].\n", i, i, hkt("T"), hkt(fmt.Sprintf("Tuple%d", i))) fmt.Fprintf(f, "\n// SequenceT%d converts %d parameters of [%s] into a [%s].\n", i, i, hkt("T"), hkt(fmt.Sprintf("Tuple%d", i)))
fmt.Fprintf(f, "func SequenceT%d[", i) fmt.Fprintf(f, "func SequenceT%d[", i)
if infix != "" { if S.IsNonEmpty(infix) {
fmt.Fprintf(f, "%s", infix) fmt.Fprintf(f, "%s", infix)
} }
for j := 0; j < i; j++ { for j := 0; j < i; j++ {
if infix != "" || j > 0 { if S.IsNonEmpty(infix) || j > 0 {
fmt.Fprintf(f, ", ") fmt.Fprintf(f, ", ")
} }
fmt.Fprintf(f, "T%d", j+1) fmt.Fprintf(f, "T%d", j+1)
@@ -339,7 +341,7 @@ func generateSequenceT1(
fmt.Fprintf(f, " return A.SequenceT%d(\n", i) fmt.Fprintf(f, " return A.SequenceT%d(\n", i)
// map // map
fmt.Fprintf(f, " Map[") fmt.Fprintf(f, " Map[")
if infix != "" { if S.IsNonEmpty(infix) {
fmt.Fprintf(f, "%s, T1,", infix) fmt.Fprintf(f, "%s, T1,", infix)
} else { } else {
fmt.Fprintf(f, "T1,") fmt.Fprintf(f, "T1,")
@@ -361,7 +363,7 @@ func generateSequenceT1(
fmt.Fprintf(f, " ") fmt.Fprintf(f, " ")
} }
fmt.Fprintf(f, "%s", tuple) fmt.Fprintf(f, "%s", tuple)
if infix != "" { if S.IsNonEmpty(infix) {
fmt.Fprintf(f, ", %s", infix) fmt.Fprintf(f, ", %s", infix)
} }
fmt.Fprintf(f, ", T%d],\n", j+1) fmt.Fprintf(f, ", T%d],\n", j+1)

11
v2/constant/monoid.go Normal file
View File

@@ -0,0 +1,11 @@
package constant
import (
"github.com/IBM/fp-go/v2/function"
M "github.com/IBM/fp-go/v2/monoid"
)
// Monoid returns a [M.Monoid] that returns a constant value in all operations
func Monoid[A any](a A) M.Monoid[A] {
return M.MakeMonoid(function.Constant2[A, A](a), a)
}

177
v2/consumer/consumer.go Normal file
View File

@@ -0,0 +1,177 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package consumer
// Local transforms a Consumer by preprocessing its input through a function.
// This is the contravariant map operation for Consumers, analogous to reader.Local
// but operating on the input side rather than the output side.
//
// Given a Consumer[R1] that consumes values of type R1, and a function f that
// converts R2 to R1, Local creates a new Consumer[R2] that:
// 1. Takes a value of type R2
// 2. Applies f to convert it to R1
// 3. Passes the result to the original Consumer[R1]
//
// This is particularly useful for adapting consumers to work with different input types,
// similar to how reader.Local adapts readers to work with different environment types.
//
// Comparison with reader.Local:
// - reader.Local: Transforms the environment BEFORE passing it to a Reader (preprocessing input)
// - consumer.Local: Transforms the value BEFORE passing it to a Consumer (preprocessing input)
// - Both are contravariant operations on the input type
// - Reader produces output, Consumer performs side effects
//
// Type Parameters:
// - R2: The input type of the new Consumer (what you have)
// - R1: The input type of the original Consumer (what it expects)
//
// Parameters:
// - f: A function that converts R2 to R1 (preprocessing function)
//
// Returns:
// - An Operator that transforms Consumer[R1] into Consumer[R2]
//
// Example - Basic type adaptation:
//
// // Consumer that logs integers
// logInt := func(x int) {
// fmt.Printf("Value: %d\n", x)
// }
//
// // Adapt it to consume strings by parsing them first
// parseToInt := func(s string) int {
// n, _ := strconv.Atoi(s)
// return n
// }
//
// logString := consumer.Local(parseToInt)(logInt)
// logString("42") // Logs: "Value: 42"
//
// Example - Extracting fields from structs:
//
// type User struct {
// Name string
// Age int
// }
//
// // Consumer that logs names
// logName := func(name string) {
// fmt.Printf("Name: %s\n", name)
// }
//
// // Adapt it to consume User structs
// extractName := func(u User) string {
// return u.Name
// }
//
// logUser := consumer.Local(extractName)(logName)
// logUser(User{Name: "Alice", Age: 30}) // Logs: "Name: Alice"
//
// Example - Simplifying complex types:
//
// type DetailedConfig struct {
// Host string
// Port int
// Timeout time.Duration
// MaxRetry int
// }
//
// type SimpleConfig struct {
// Host string
// Port int
// }
//
// // Consumer that logs simple configs
// logSimple := func(c SimpleConfig) {
// fmt.Printf("Server: %s:%d\n", c.Host, c.Port)
// }
//
// // Adapt it to consume detailed configs
// simplify := func(d DetailedConfig) SimpleConfig {
// return SimpleConfig{Host: d.Host, Port: d.Port}
// }
//
// logDetailed := consumer.Local(simplify)(logSimple)
// logDetailed(DetailedConfig{
// Host: "localhost",
// Port: 8080,
// Timeout: time.Second,
// MaxRetry: 3,
// }) // Logs: "Server: localhost:8080"
//
// Example - Composing multiple transformations:
//
// type Response struct {
// StatusCode int
// Body string
// }
//
// // Consumer that logs status codes
// logStatus := func(code int) {
// fmt.Printf("Status: %d\n", code)
// }
//
// // Extract status code from response
// getStatus := func(r Response) int {
// return r.StatusCode
// }
//
// // Adapt to consume responses
// logResponse := consumer.Local(getStatus)(logStatus)
// logResponse(Response{StatusCode: 200, Body: "OK"}) // Logs: "Status: 200"
//
// Example - Using with multiple consumers:
//
// type Event struct {
// Type string
// Timestamp time.Time
// Data map[string]any
// }
//
// // Consumers for different aspects
// logType := func(t string) { fmt.Printf("Type: %s\n", t) }
// logTime := func(t time.Time) { fmt.Printf("Time: %v\n", t) }
//
// // Adapt them to consume events
// logEventType := consumer.Local(func(e Event) string { return e.Type })(logType)
// logEventTime := consumer.Local(func(e Event) time.Time { return e.Timestamp })(logTime)
//
// event := Event{Type: "UserLogin", Timestamp: time.Now(), Data: nil}
// logEventType(event) // Logs: "Type: UserLogin"
// logEventTime(event) // Logs: "Time: ..."
//
// Use Cases:
// - Type adaptation: Convert between different input types
// - Field extraction: Extract specific fields from complex structures
// - Data transformation: Preprocess data before consumption
// - Interface adaptation: Adapt consumers to work with different interfaces
// - Logging pipelines: Transform data before logging
// - Event handling: Extract relevant data from events before processing
//
// Relationship to Reader:
// Consumer is the dual of Reader in category theory:
// - Reader[R, A] = R -> A (produces output from environment)
// - Consumer[A] = A -> () (consumes input, produces side effects)
// - reader.Local transforms the environment before reading
// - consumer.Local transforms the input before consuming
// - Both are contravariant functors on their input type
func Local[R2, R1 any](f func(R2) R1) Operator[R1, R2] {
return func(c Consumer[R1]) Consumer[R2] {
return func(r2 R2) {
c(f(r2))
}
}
}

View File

@@ -0,0 +1,383 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package consumer
import (
"strconv"
"testing"
"time"
"github.com/IBM/fp-go/v2/function"
"github.com/stretchr/testify/assert"
)
func TestLocal(t *testing.T) {
t.Run("basic type transformation", func(t *testing.T) {
var captured int
consumeInt := func(x int) {
captured = x
}
// Transform string to int before consuming
stringToInt := func(s string) int {
n, _ := strconv.Atoi(s)
return n
}
consumeString := Local(stringToInt)(consumeInt)
consumeString("42")
assert.Equal(t, 42, captured)
})
t.Run("field extraction from struct", func(t *testing.T) {
type User struct {
Name string
Age int
}
var capturedName string
consumeName := func(name string) {
capturedName = name
}
extractName := func(u User) string {
return u.Name
}
consumeUser := Local(extractName)(consumeName)
consumeUser(User{Name: "Alice", Age: 30})
assert.Equal(t, "Alice", capturedName)
})
t.Run("simplifying complex types", func(t *testing.T) {
type DetailedConfig struct {
Host string
Port int
Timeout time.Duration
MaxRetry int
}
type SimpleConfig struct {
Host string
Port int
}
var captured SimpleConfig
consumeSimple := func(c SimpleConfig) {
captured = c
}
simplify := func(d DetailedConfig) SimpleConfig {
return SimpleConfig{Host: d.Host, Port: d.Port}
}
consumeDetailed := Local(simplify)(consumeSimple)
consumeDetailed(DetailedConfig{
Host: "localhost",
Port: 8080,
Timeout: time.Second,
MaxRetry: 3,
})
assert.Equal(t, SimpleConfig{Host: "localhost", Port: 8080}, captured)
})
t.Run("multiple transformations", func(t *testing.T) {
type Response struct {
StatusCode int
Body string
}
var capturedStatus int
consumeStatus := func(code int) {
capturedStatus = code
}
getStatus := func(r Response) int {
return r.StatusCode
}
consumeResponse := Local(getStatus)(consumeStatus)
consumeResponse(Response{StatusCode: 200, Body: "OK"})
assert.Equal(t, 200, capturedStatus)
})
t.Run("chaining Local transformations", func(t *testing.T) {
type Level3 struct{ Value int }
type Level2 struct{ L3 Level3 }
type Level1 struct{ L2 Level2 }
var captured int
consumeInt := func(x int) {
captured = x
}
// Chain multiple Local transformations
extract3 := func(l3 Level3) int { return l3.Value }
extract2 := func(l2 Level2) Level3 { return l2.L3 }
extract1 := func(l1 Level1) Level2 { return l1.L2 }
// Compose the transformations
consumeLevel3 := Local(extract3)(consumeInt)
consumeLevel2 := Local(extract2)(consumeLevel3)
consumeLevel1 := Local(extract1)(consumeLevel2)
consumeLevel1(Level1{L2: Level2{L3: Level3{Value: 42}}})
assert.Equal(t, 42, captured)
})
t.Run("identity transformation", func(t *testing.T) {
var captured string
consumeString := func(s string) {
captured = s
}
identity := function.Identity[string]
consumeIdentity := Local(identity)(consumeString)
consumeIdentity("test")
assert.Equal(t, "test", captured)
})
t.Run("transformation with calculation", func(t *testing.T) {
type Rectangle struct {
Width int
Height int
}
var capturedArea int
consumeArea := func(area int) {
capturedArea = area
}
calculateArea := func(r Rectangle) int {
return r.Width * r.Height
}
consumeRectangle := Local(calculateArea)(consumeArea)
consumeRectangle(Rectangle{Width: 5, Height: 10})
assert.Equal(t, 50, capturedArea)
})
t.Run("multiple consumers with same transformation", func(t *testing.T) {
type Event struct {
Type string
Timestamp time.Time
}
var capturedType string
var capturedTime time.Time
consumeType := func(t string) {
capturedType = t
}
consumeTime := func(t time.Time) {
capturedTime = t
}
extractType := func(e Event) string { return e.Type }
extractTime := func(e Event) time.Time { return e.Timestamp }
consumeEventType := Local(extractType)(consumeType)
consumeEventTime := Local(extractTime)(consumeTime)
now := time.Now()
event := Event{Type: "UserLogin", Timestamp: now}
consumeEventType(event)
consumeEventTime(event)
assert.Equal(t, "UserLogin", capturedType)
assert.Equal(t, now, capturedTime)
})
t.Run("transformation with slice", func(t *testing.T) {
var captured int
consumeLength := func(n int) {
captured = n
}
getLength := func(s []string) int {
return len(s)
}
consumeSlice := Local(getLength)(consumeLength)
consumeSlice([]string{"a", "b", "c"})
assert.Equal(t, 3, captured)
})
t.Run("transformation with map", func(t *testing.T) {
var captured int
consumeCount := func(n int) {
captured = n
}
getCount := func(m map[string]int) int {
return len(m)
}
consumeMap := Local(getCount)(consumeCount)
consumeMap(map[string]int{"a": 1, "b": 2, "c": 3})
assert.Equal(t, 3, captured)
})
t.Run("transformation with pointer", func(t *testing.T) {
var captured int
consumeInt := func(x int) {
captured = x
}
dereference := func(p *int) int {
if p == nil {
return 0
}
return *p
}
consumePointer := Local(dereference)(consumeInt)
value := 42
consumePointer(&value)
assert.Equal(t, 42, captured)
consumePointer(nil)
assert.Equal(t, 0, captured)
})
t.Run("transformation with custom type", func(t *testing.T) {
type MyType struct {
Value string
}
var captured string
consumeString := func(s string) {
captured = s
}
extractValue := func(m MyType) string {
return m.Value
}
consumeMyType := Local(extractValue)(consumeString)
consumeMyType(MyType{Value: "test"})
assert.Equal(t, "test", captured)
})
t.Run("accumulation through multiple calls", func(t *testing.T) {
var sum int
accumulate := func(x int) {
sum += x
}
double := func(x int) int {
return x * 2
}
accumulateDoubled := Local(double)(accumulate)
accumulateDoubled(1)
accumulateDoubled(2)
accumulateDoubled(3)
assert.Equal(t, 12, sum) // (1*2) + (2*2) + (3*2) = 2 + 4 + 6 = 12
})
t.Run("transformation with error handling", func(t *testing.T) {
type Result struct {
Value int
Error error
}
var captured int
consumeInt := func(x int) {
captured = x
}
extractValue := func(r Result) int {
if r.Error != nil {
return -1
}
return r.Value
}
consumeResult := Local(extractValue)(consumeInt)
consumeResult(Result{Value: 42, Error: nil})
assert.Equal(t, 42, captured)
consumeResult(Result{Value: 100, Error: assert.AnError})
assert.Equal(t, -1, captured)
})
t.Run("transformation preserves consumer behavior", func(t *testing.T) {
callCount := 0
consumer := func(x int) {
callCount++
}
transform := func(s string) int {
n, _ := strconv.Atoi(s)
return n
}
transformedConsumer := Local(transform)(consumer)
transformedConsumer("1")
transformedConsumer("2")
transformedConsumer("3")
assert.Equal(t, 3, callCount)
})
t.Run("comparison with reader.Local behavior", func(t *testing.T) {
// This test demonstrates the dual nature of Consumer and Reader
// Consumer: transforms input before consumption (contravariant)
// Reader: transforms environment before reading (also contravariant on input)
type DetailedEnv struct {
Value int
Extra string
}
type SimpleEnv struct {
Value int
}
var captured int
consumeSimple := func(e SimpleEnv) {
captured = e.Value
}
simplify := func(d DetailedEnv) SimpleEnv {
return SimpleEnv{Value: d.Value}
}
consumeDetailed := Local(simplify)(consumeSimple)
consumeDetailed(DetailedEnv{Value: 42, Extra: "ignored"})
assert.Equal(t, 42, captured)
})
}

56
v2/consumer/types.go Normal file
View File

@@ -0,0 +1,56 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package consumer provides types and utilities for functions that consume values without returning results.
//
// A Consumer represents a side-effecting operation that accepts a value but produces no output.
// This is useful for operations like logging, printing, updating state, or any action where
// the return value is not needed.
package consumer
type (
// Consumer represents a function that accepts a value of type A and performs a side effect.
// It does not return any value, making it useful for operations where only the side effect matters,
// such as logging, printing, or updating external state.
//
// This is a fundamental concept in functional programming for handling side effects in a
// controlled manner. Consumers can be composed, chained, or used in higher-order functions
// to build complex side-effecting behaviors.
//
// Type Parameters:
// - A: The type of value consumed by the function
//
// Example:
//
// // A simple consumer that prints values
// var printInt Consumer[int] = func(x int) {
// fmt.Println(x)
// }
// printInt(42) // Prints: 42
//
// // A consumer that logs messages
// var logger Consumer[string] = func(msg string) {
// log.Println(msg)
// }
// logger("Hello, World!") // Logs: Hello, World!
//
// // Consumers can be used in functional pipelines
// var saveToDatabase Consumer[User] = func(user User) {
// db.Save(user)
// }
Consumer[A any] = func(A)
Operator[A, B any] = func(Consumer[A]) Consumer[B]
)

View File

@@ -24,8 +24,8 @@ import (
// withContext wraps an existing IOEither and performs a context check for cancellation before delegating // withContext wraps an existing IOEither and performs a context check for cancellation before delegating
func WithContext[A any](ctx context.Context, ma IOResult[A]) IOResult[A] { func WithContext[A any](ctx context.Context, ma IOResult[A]) IOResult[A] {
return func() Result[A] { return func() Result[A] {
if err := context.Cause(ctx); err != nil { if ctx.Err() != nil {
return result.Left[A](err) return result.Left[A](context.Cause(ctx))
} }
return ma() return ma()
} }

View File

@@ -0,0 +1,16 @@
package readerio
import (
RIO "github.com/IBM/fp-go/v2/readerio"
)
//go:inline
func Bracket[
A, B, ANY any](
acquire ReaderIO[A],
use Kleisli[A, B],
release func(A, B) ReaderIO[ANY],
) ReaderIO[B] {
return RIO.Bracket(acquire, use, release)
}

View File

@@ -0,0 +1,13 @@
package readerio
import "github.com/IBM/fp-go/v2/io"
//go:inline
func ChainConsumer[A any](c Consumer[A]) Operator[A, struct{}] {
return ChainIOK(io.FromConsumerK(c))
}
//go:inline
func ChainFirstConsumer[A any](c Consumer[A]) Operator[A, A] {
return ChainFirstIOK(io.FromConsumerK(c))
}

View File

@@ -0,0 +1,20 @@
package readerio
import (
"context"
"github.com/IBM/fp-go/v2/reader"
RIO "github.com/IBM/fp-go/v2/readerio"
)
//go:inline
func SequenceReader[R, A any](ma ReaderIO[Reader[R, A]]) Reader[R, ReaderIO[A]] {
return RIO.SequenceReader(ma)
}
//go:inline
func TraverseReader[R, A, B any](
f reader.Kleisli[R, A, B],
) func(ReaderIO[A]) Kleisli[R, B] {
return RIO.TraverseReader[context.Context](f)
}

View File

@@ -0,0 +1,29 @@
package readerio
import (
"context"
"log/slog"
"github.com/IBM/fp-go/v2/logging"
)
func SLogWithCallback[A any](
logLevel slog.Level,
cb func(context.Context) *slog.Logger,
message string) Kleisli[A, A] {
return func(a A) ReaderIO[A] {
return func(ctx context.Context) IO[A] {
// logger
logger := cb(ctx)
return func() A {
logger.LogAttrs(ctx, logLevel, message, slog.Any("value", a))
return a
}
}
}
}
//go:inline
func SLog[A any](message string) Kleisli[A, A] {
return SLogWithCallback[A](slog.LevelInfo, logging.GetLoggerFromContext, message)
}

View File

@@ -0,0 +1,769 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerio
import (
"context"
"time"
"github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/reader"
RIO "github.com/IBM/fp-go/v2/readerio"
)
const (
// useParallel is the feature flag to control if we use the parallel or the sequential implementation of ap
useParallel = true
)
// MonadMap transforms the success value of a [ReaderIO] using the provided function.
// If the computation fails, the error is propagated unchanged.
//
// Parameters:
// - fa: The ReaderIO to transform
// - f: The transformation function
//
// Returns a new ReaderIO with the transformed value.
//
//go:inline
func MonadMap[A, B any](fa ReaderIO[A], f func(A) B) ReaderIO[B] {
return RIO.MonadMap(fa, f)
}
// Map transforms the success value of a [ReaderIO] using the provided function.
// This is the curried version of [MonadMap], useful for composition.
//
// Parameters:
// - f: The transformation function
//
// Returns a function that transforms a ReaderIO.
//
//go:inline
func Map[A, B any](f func(A) B) Operator[A, B] {
return RIO.Map[context.Context](f)
}
// MonadMapTo replaces the success value of a [ReaderIO] with a constant value.
// If the computation fails, the error is propagated unchanged.
//
// Parameters:
// - fa: The ReaderIO to transform
// - b: The constant value to use
//
// Returns a new ReaderIO with the constant value.
//
//go:inline
func MonadMapTo[A, B any](fa ReaderIO[A], b B) ReaderIO[B] {
return RIO.MonadMapTo(fa, b)
}
// MapTo replaces the success value of a [ReaderIO] with a constant value.
// This is the curried version of [MonadMapTo].
//
// Parameters:
// - b: The constant value to use
//
// Returns a function that transforms a ReaderIO.
//
//go:inline
func MapTo[A, B any](b B) Operator[A, B] {
return RIO.MapTo[context.Context, A](b)
}
// MonadChain sequences two [ReaderIO] computations, where the second depends on the result of the first.
// If the first computation fails, the second is not executed.
//
// Parameters:
// - ma: The first ReaderIO
// - f: Function that produces the second ReaderIO based on the first's result
//
// Returns a new ReaderIO representing the sequenced computation.
//
//go:inline
func MonadChain[A, B any](ma ReaderIO[A], f Kleisli[A, B]) ReaderIO[B] {
return RIO.MonadChain(ma, f)
}
// Chain sequences two [ReaderIO] computations, where the second depends on the result of the first.
// This is the curried version of [MonadChain], useful for composition.
//
// Parameters:
// - f: Function that produces the second ReaderIO based on the first's result
//
// Returns a function that sequences ReaderIO computations.
//
//go:inline
func Chain[A, B any](f Kleisli[A, B]) Operator[A, B] {
return RIO.Chain(f)
}
// MonadChainFirst sequences two [ReaderIO] computations but returns the result of the first.
// The second computation is executed for its side effects only.
//
// Parameters:
// - ma: The first ReaderIO
// - f: Function that produces the second ReaderIO
//
// Returns a ReaderIO with the result of the first computation.
//
//go:inline
func MonadChainFirst[A, B any](ma ReaderIO[A], f Kleisli[A, B]) ReaderIO[A] {
return RIO.MonadChainFirst(ma, f)
}
// MonadTap executes a side-effect computation but returns the original value.
// This is an alias for [MonadChainFirst] and is useful for operations like logging
// or validation that should not affect the main computation flow.
//
// Parameters:
// - ma: The ReaderIO to tap
// - f: Function that produces a side-effect ReaderIO
//
// Returns a ReaderIO with the original value after executing the side effect.
//
//go:inline
func MonadTap[A, B any](ma ReaderIO[A], f Kleisli[A, B]) ReaderIO[A] {
return RIO.MonadTap(ma, f)
}
// ChainFirst sequences two [ReaderIO] computations but returns the result of the first.
// This is the curried version of [MonadChainFirst].
//
// Parameters:
// - f: Function that produces the second ReaderIO
//
// Returns a function that sequences ReaderIO computations.
//
//go:inline
func ChainFirst[A, B any](f Kleisli[A, B]) Operator[A, A] {
return RIO.ChainFirst(f)
}
// Tap executes a side-effect computation but returns the original value.
// This is the curried version of [MonadTap], an alias for [ChainFirst].
//
// Parameters:
// - f: Function that produces a side-effect ReaderIO
//
// Returns a function that taps ReaderIO computations.
//
//go:inline
func Tap[A, B any](f Kleisli[A, B]) Operator[A, A] {
return RIO.Tap(f)
}
// Of creates a [ReaderIO] that always succeeds with the given value.
// This is the same as [Right] and represents the monadic return operation.
//
// Parameters:
// - a: The value to wrap
//
// Returns a ReaderIO that always succeeds with the given value.
//
//go:inline
func Of[A any](a A) ReaderIO[A] {
return RIO.Of[context.Context](a)
}
// MonadApPar implements parallel applicative application for [ReaderIO].
// It executes the function and value computations in parallel where possible,
// potentially improving performance for independent operations.
//
// Parameters:
// - fab: ReaderIO containing a function
// - fa: ReaderIO containing a value
//
// Returns a ReaderIO with the function applied to the value.
//
//go:inline
func MonadApPar[B, A any](fab ReaderIO[func(A) B], fa ReaderIO[A]) ReaderIO[B] {
return RIO.MonadApPar(fab, fa)
}
// MonadAp implements applicative application for [ReaderIO].
// By default, it uses parallel execution ([MonadApPar]) but can be configured to use
// sequential execution ([MonadApSeq]) via the useParallel constant.
//
// Parameters:
// - fab: ReaderIO containing a function
// - fa: ReaderIO containing a value
//
// Returns a ReaderIO with the function applied to the value.
//
//go:inline
func MonadAp[B, A any](fab ReaderIO[func(A) B], fa ReaderIO[A]) ReaderIO[B] {
// dispatch to the configured version
if useParallel {
return MonadApPar(fab, fa)
}
return MonadApSeq(fab, fa)
}
// MonadApSeq implements sequential applicative application for [ReaderIO].
// It executes the function computation first, then the value computation.
//
// Parameters:
// - fab: ReaderIO containing a function
// - fa: ReaderIO containing a value
//
// Returns a ReaderIO with the function applied to the value.
//
//go:inline
func MonadApSeq[B, A any](fab ReaderIO[func(A) B], fa ReaderIO[A]) ReaderIO[B] {
return RIO.MonadApSeq(fab, fa)
}
// Ap applies a function wrapped in a [ReaderIO] to a value wrapped in a ReaderIO.
// This is the curried version of [MonadAp], using the default execution mode.
//
// Parameters:
// - fa: ReaderIO containing a value
//
// Returns a function that applies a ReaderIO function to the value.
//
//go:inline
func Ap[B, A any](fa ReaderIO[A]) Operator[func(A) B, B] {
return RIO.Ap[B](fa)
}
// ApSeq applies a function wrapped in a [ReaderIO] to a value sequentially.
// This is the curried version of [MonadApSeq].
//
// Parameters:
// - fa: ReaderIO containing a value
//
// Returns a function that applies a ReaderIO function to the value sequentially.
//
//go:inline
func ApSeq[B, A any](fa ReaderIO[A]) Operator[func(A) B, B] {
return function.Bind2nd(MonadApSeq[B, A], fa)
}
// ApPar applies a function wrapped in a [ReaderIO] to a value in parallel.
// This is the curried version of [MonadApPar].
//
// Parameters:
// - fa: ReaderIO containing a value
//
// Returns a function that applies a ReaderIO function to the value in parallel.
//
//go:inline
func ApPar[B, A any](fa ReaderIO[A]) Operator[func(A) B, B] {
return function.Bind2nd(MonadApPar[B, A], fa)
}
// Ask returns a [ReaderIO] that provides access to the context.
// This is useful for accessing the [context.Context] within a computation.
//
// Returns a ReaderIO that produces the context.
//
//go:inline
func Ask() ReaderIO[context.Context] {
return RIO.Ask[context.Context]()
}
// FromIO converts an [IO] into a [ReaderIO].
// The IO computation always succeeds, so it's wrapped in Right.
//
// Parameters:
// - t: The IO to convert
//
// Returns a ReaderIO that executes the IO and wraps the result in Right.
//
//go:inline
func FromIO[A any](t IO[A]) ReaderIO[A] {
return RIO.FromIO[context.Context](t)
}
// FromReader converts a [Reader] into a [ReaderIO].
// The Reader computation is lifted into the IO context, allowing it to be
// composed with other ReaderIO operations.
//
// Parameters:
// - t: The Reader to convert
//
// Returns a ReaderIO that executes the Reader and wraps the result in IO.
//
//go:inline
func FromReader[A any](t Reader[context.Context, A]) ReaderIO[A] {
return RIO.FromReader(t)
}
// FromLazy converts a [Lazy] computation into a [ReaderIO].
// The Lazy computation always succeeds, so it's wrapped in Right.
// This is an alias for [FromIO] since Lazy and IO have the same structure.
//
// Parameters:
// - t: The Lazy computation to convert
//
// Returns a ReaderIO that executes the Lazy computation and wraps the result in Right.
//
//go:inline
func FromLazy[A any](t Lazy[A]) ReaderIO[A] {
return RIO.FromIO[context.Context](t)
}
// MonadChainIOK chains a function that returns an [IO] into a [ReaderIO] computation.
// The IO computation always succeeds, so it's wrapped in Right.
//
// Parameters:
// - ma: The ReaderIO to chain from
// - f: Function that produces an IO
//
// Returns a new ReaderIO with the chained IO computation.
//
//go:inline
func MonadChainIOK[A, B any](ma ReaderIO[A], f func(A) IO[B]) ReaderIO[B] {
return RIO.MonadChainIOK(ma, f)
}
// ChainIOK chains a function that returns an [IO] into a [ReaderIO] computation.
// This is the curried version of [MonadChainIOK].
//
// Parameters:
// - f: Function that produces an IO
//
// Returns a function that chains the IO-returning function.
//
//go:inline
func ChainIOK[A, B any](f func(A) IO[B]) Operator[A, B] {
return RIO.ChainIOK[context.Context](f)
}
// MonadChainFirstIOK chains a function that returns an [IO] but keeps the original value.
// The IO computation is executed for its side effects only.
//
// Parameters:
// - ma: The ReaderIO to chain from
// - f: Function that produces an IO
//
// Returns a ReaderIO with the original value after executing the IO.
//
//go:inline
func MonadChainFirstIOK[A, B any](ma ReaderIO[A], f func(A) IO[B]) ReaderIO[A] {
return RIO.MonadChainFirstIOK(ma, f)
}
// MonadTapIOK chains a function that returns an [IO] but keeps the original value.
// This is an alias for [MonadChainFirstIOK] and is useful for side effects like logging.
//
// Parameters:
// - ma: The ReaderIO to tap
// - f: Function that produces an IO for side effects
//
// Returns a ReaderIO with the original value after executing the IO.
//
//go:inline
func MonadTapIOK[A, B any](ma ReaderIO[A], f func(A) IO[B]) ReaderIO[A] {
return RIO.MonadTapIOK(ma, f)
}
// ChainFirstIOK chains a function that returns an [IO] but keeps the original value.
// This is the curried version of [MonadChainFirstIOK].
//
// Parameters:
// - f: Function that produces an IO
//
// Returns a function that chains the IO-returning function.
//
//go:inline
func ChainFirstIOK[A, B any](f func(A) IO[B]) Operator[A, A] {
return RIO.ChainFirstIOK[context.Context](f)
}
// TapIOK chains a function that returns an [IO] but keeps the original value.
// This is the curried version of [MonadTapIOK], an alias for [ChainFirstIOK].
//
// Parameters:
// - f: Function that produces an IO for side effects
//
// Returns a function that taps with IO-returning functions.
//
//go:inline
func TapIOK[A, B any](f func(A) IO[B]) Operator[A, A] {
return RIO.TapIOK[context.Context](f)
}
// Defer creates a [ReaderIO] by lazily generating a new computation each time it's executed.
// This is useful for creating computations that should be re-evaluated on each execution.
//
// Parameters:
// - gen: Lazy generator function that produces a ReaderIO
//
// Returns a ReaderIO that generates a fresh computation on each execution.
//
//go:inline
func Defer[A any](gen Lazy[ReaderIO[A]]) ReaderIO[A] {
return RIO.Defer(gen)
}
// Memoize computes the value of the provided [ReaderIO] monad lazily but exactly once.
// The context used to compute the value is the context of the first call, so do not use this
// method if the value has a functional dependency on the content of the context.
//
// Parameters:
// - rdr: The ReaderIO to memoize
//
// Returns a ReaderIO that caches its result after the first execution.
//
//go:inline
func Memoize[A any](rdr ReaderIO[A]) ReaderIO[A] {
return RIO.Memoize(rdr)
}
// Flatten converts a nested [ReaderIO] into a flat [ReaderIO].
// This is equivalent to [MonadChain] with the identity function.
//
// Parameters:
// - rdr: The nested ReaderIO to flatten
//
// Returns a flattened ReaderIO.
//
//go:inline
func Flatten[A any](rdr ReaderIO[ReaderIO[A]]) ReaderIO[A] {
return RIO.Flatten(rdr)
}
// MonadFlap applies a value to a function wrapped in a [ReaderIO].
// This is the reverse of [MonadAp], useful in certain composition scenarios.
//
// Parameters:
// - fab: ReaderIO containing a function
// - a: The value to apply to the function
//
// Returns a ReaderIO with the function applied to the value.
//
//go:inline
func MonadFlap[B, A any](fab ReaderIO[func(A) B], a A) ReaderIO[B] {
return RIO.MonadFlap(fab, a)
}
// Flap applies a value to a function wrapped in a [ReaderIO].
// This is the curried version of [MonadFlap].
//
// Parameters:
// - a: The value to apply to the function
//
// Returns a function that applies the value to a ReaderIO function.
//
//go:inline
func Flap[B, A any](a A) Operator[func(A) B, B] {
return RIO.Flap[context.Context, B](a)
}
// MonadChainReaderK chains a [ReaderIO] with a function that returns a [Reader].
// The Reader is lifted into the ReaderIO context, allowing composition of
// Reader and ReaderIO operations.
//
// Parameters:
// - ma: The ReaderIO to chain from
// - f: Function that produces a Reader
//
// Returns a new ReaderIO with the chained Reader computation.
//
//go:inline
func MonadChainReaderK[A, B any](ma ReaderIO[A], f reader.Kleisli[context.Context, A, B]) ReaderIO[B] {
return RIO.MonadChainReaderK(ma, f)
}
// ChainReaderK chains a [ReaderIO] with a function that returns a [Reader].
// This is the curried version of [MonadChainReaderK].
//
// Parameters:
// - f: Function that produces a Reader
//
// Returns a function that chains Reader-returning functions.
//
//go:inline
func ChainReaderK[A, B any](f reader.Kleisli[context.Context, A, B]) Operator[A, B] {
return RIO.ChainReaderK(f)
}
// MonadChainFirstReaderK chains a function that returns a [Reader] but keeps the original value.
// The Reader computation is executed for its side effects only.
//
// Parameters:
// - ma: The ReaderIO to chain from
// - f: Function that produces a Reader
//
// Returns a ReaderIO with the original value after executing the Reader.
//
//go:inline
func MonadChainFirstReaderK[A, B any](ma ReaderIO[A], f reader.Kleisli[context.Context, A, B]) ReaderIO[A] {
return RIO.MonadChainFirstReaderK(ma, f)
}
// MonadTapReaderK chains a function that returns a [Reader] but keeps the original value.
// This is an alias for [MonadChainFirstReaderK] and is useful for side effects.
//
// Parameters:
// - ma: The ReaderIO to tap
// - f: Function that produces a Reader for side effects
//
// Returns a ReaderIO with the original value after executing the Reader.
//
//go:inline
func MonadTapReaderK[A, B any](ma ReaderIO[A], f reader.Kleisli[context.Context, A, B]) ReaderIO[A] {
return RIO.MonadTapReaderK(ma, f)
}
// ChainFirstReaderK chains a function that returns a [Reader] but keeps the original value.
// This is the curried version of [MonadChainFirstReaderK].
//
// Parameters:
// - f: Function that produces a Reader
//
// Returns a function that chains Reader-returning functions while preserving the original value.
//
//go:inline
func ChainFirstReaderK[A, B any](f reader.Kleisli[context.Context, A, B]) Operator[A, A] {
return RIO.ChainFirstReaderK(f)
}
// TapReaderK chains a function that returns a [Reader] but keeps the original value.
// This is the curried version of [MonadTapReaderK], an alias for [ChainFirstReaderK].
//
// Parameters:
// - f: Function that produces a Reader for side effects
//
// Returns a function that taps with Reader-returning functions.
//
//go:inline
func TapReaderK[A, B any](f reader.Kleisli[context.Context, A, B]) Operator[A, A] {
return RIO.TapReaderK(f)
}
// Read executes a [ReaderIO] with a given context, returning the resulting [IO].
// This is useful for providing the context dependency and obtaining an IO action
// that can be executed later.
//
// Parameters:
// - r: The context to provide to the ReaderIO
//
// Returns a function that converts a ReaderIO into an IO by applying the context.
//
//go:inline
func Read[A any](r context.Context) func(ReaderIO[A]) IO[A] {
return RIO.Read[A](r)
}
// Local transforms the context.Context environment before passing it to a ReaderIO computation.
//
// This is the Reader's local operation, which allows you to modify the environment
// for a specific computation without affecting the outer context. The transformation
// function receives the current context and returns a new context along with a
// cancel function. The cancel function is automatically called when the computation
// completes (via defer), ensuring proper cleanup of resources.
//
// This is useful for:
// - Adding timeouts or deadlines to specific operations
// - Adding context values for nested computations
// - Creating isolated context scopes
// - Implementing context-based dependency injection
//
// Type Parameters:
// - A: The value type of the ReaderIO
//
// Parameters:
// - f: A function that transforms the context and returns a cancel function
//
// Returns:
// - An Operator that runs the computation with the transformed context
//
// Example:
//
// import F "github.com/IBM/fp-go/v2/function"
//
// // Add a custom value to the context
// type key int
// const userKey key = 0
//
// addUser := readerio.Local[string](func(ctx context.Context) (context.Context, context.CancelFunc) {
// newCtx := context.WithValue(ctx, userKey, "Alice")
// return newCtx, func() {} // No-op cancel
// })
//
// getUser := readerio.FromReader(func(ctx context.Context) string {
// if user := ctx.Value(userKey); user != nil {
// return user.(string)
// }
// return "unknown"
// })
//
// result := F.Pipe1(
// getUser,
// addUser,
// )
// user := result(context.Background())() // Returns "Alice"
//
// Timeout Example:
//
// // Add a 5-second timeout to a specific operation
// withTimeout := readerio.Local[Data](func(ctx context.Context) (context.Context, context.CancelFunc) {
// return context.WithTimeout(ctx, 5*time.Second)
// })
//
// result := F.Pipe1(
// fetchData,
// withTimeout,
// )
func Local[A any](f func(context.Context) (context.Context, context.CancelFunc)) Operator[A, A] {
return func(rr ReaderIO[A]) ReaderIO[A] {
return func(ctx context.Context) IO[A] {
return func() A {
otherCtx, otherCancel := f(ctx)
defer otherCancel()
return rr(otherCtx)()
}
}
}
}
// WithTimeout adds a timeout to the context for a ReaderIO computation.
//
// This is a convenience wrapper around Local that uses context.WithTimeout.
// The computation must complete within the specified duration, or it will be
// cancelled. This is useful for ensuring operations don't run indefinitely
// and for implementing timeout-based error handling.
//
// The timeout is relative to when the ReaderIO is executed, not when
// WithTimeout is called. The cancel function is automatically called when
// the computation completes, ensuring proper cleanup.
//
// Type Parameters:
// - A: The value type of the ReaderIO
//
// Parameters:
// - timeout: The maximum duration for the computation
//
// Returns:
// - An Operator that runs the computation with a timeout
//
// Example:
//
// import (
// "time"
// F "github.com/IBM/fp-go/v2/function"
// )
//
// // Fetch data with a 5-second timeout
// fetchData := readerio.FromReader(func(ctx context.Context) Data {
// // Simulate slow operation
// select {
// case <-time.After(10 * time.Second):
// return Data{Value: "slow"}
// case <-ctx.Done():
// return Data{}
// }
// })
//
// result := F.Pipe1(
// fetchData,
// readerio.WithTimeout[Data](5*time.Second),
// )
// data := result(context.Background())() // Returns Data{} after 5s timeout
//
// Successful Example:
//
// quickFetch := readerio.Of(Data{Value: "quick"})
// result := F.Pipe1(
// quickFetch,
// readerio.WithTimeout[Data](5*time.Second),
// )
// data := result(context.Background())() // Returns Data{Value: "quick"}
func WithTimeout[A any](timeout time.Duration) Operator[A, A] {
return Local[A](func(ctx context.Context) (context.Context, context.CancelFunc) {
return context.WithTimeout(ctx, timeout)
})
}
// WithDeadline adds an absolute deadline to the context for a ReaderIO computation.
//
// This is a convenience wrapper around Local that uses context.WithDeadline.
// The computation must complete before the specified time, or it will be
// cancelled. This is useful for coordinating operations that must finish
// by a specific time, such as request deadlines or scheduled tasks.
//
// The deadline is an absolute time, unlike WithTimeout which uses a relative
// duration. The cancel function is automatically called when the computation
// completes, ensuring proper cleanup.
//
// Type Parameters:
// - A: The value type of the ReaderIO
//
// Parameters:
// - deadline: The absolute time by which the computation must complete
//
// Returns:
// - An Operator that runs the computation with a deadline
//
// Example:
//
// import (
// "time"
// F "github.com/IBM/fp-go/v2/function"
// )
//
// // Operation must complete by 3 PM
// deadline := time.Date(2024, 1, 1, 15, 0, 0, 0, time.UTC)
//
// fetchData := readerio.FromReader(func(ctx context.Context) Data {
// // Simulate operation
// select {
// case <-time.After(1 * time.Hour):
// return Data{Value: "done"}
// case <-ctx.Done():
// return Data{}
// }
// })
//
// result := F.Pipe1(
// fetchData,
// readerio.WithDeadline[Data](deadline),
// )
// data := result(context.Background())() // Returns Data{} if past deadline
//
// Combining with Parent Context:
//
// // If parent context already has a deadline, the earlier one takes precedence
// parentCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(1*time.Hour))
// defer cancel()
//
// laterDeadline := time.Now().Add(2 * time.Hour)
// result := F.Pipe1(
// fetchData,
// readerio.WithDeadline[Data](laterDeadline),
// )
// data := result(parentCtx)() // Will use parent's 1-hour deadline
func WithDeadline[A any](deadline time.Time) Operator[A, A] {
return Local[A](func(ctx context.Context) (context.Context, context.CancelFunc) {
return context.WithDeadline(ctx, deadline)
})
}
// Delay creates an operation that passes in the value after some delay
//
//go:inline
func Delay[A any](delay time.Duration) Operator[A, A] {
return RIO.Delay[context.Context, A](delay)
}
// After creates an operation that passes after the given [time.Time]
//
//go:inline
func After[R, E, A any](timestamp time.Time) Operator[A, A] {
return RIO.After[context.Context, A](timestamp)
}

View File

@@ -0,0 +1,502 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerio
import (
"context"
"testing"
F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/internal/utils"
G "github.com/IBM/fp-go/v2/io"
N "github.com/IBM/fp-go/v2/number"
"github.com/IBM/fp-go/v2/reader"
"github.com/stretchr/testify/assert"
)
func TestMonadMap(t *testing.T) {
rio := Of(5)
doubled := MonadMap(rio, N.Mul(2))
result := doubled(context.Background())()
assert.Equal(t, 10, result)
}
func TestMap(t *testing.T) {
g := F.Pipe1(
Of(1),
Map(utils.Double),
)
assert.Equal(t, 2, g(context.Background())())
}
func TestMonadMapTo(t *testing.T) {
rio := Of(42)
replaced := MonadMapTo(rio, "constant")
result := replaced(context.Background())()
assert.Equal(t, "constant", result)
}
func TestMapTo(t *testing.T) {
result := F.Pipe1(
Of(42),
MapTo[int]("constant"),
)
assert.Equal(t, "constant", result(context.Background())())
}
func TestMonadChain(t *testing.T) {
rio1 := Of(5)
result := MonadChain(rio1, func(n int) ReaderIO[int] {
return Of(n * 3)
})
assert.Equal(t, 15, result(context.Background())())
}
func TestChain(t *testing.T) {
result := F.Pipe1(
Of(5),
Chain(func(n int) ReaderIO[int] {
return Of(n * 3)
}),
)
assert.Equal(t, 15, result(context.Background())())
}
func TestMonadChainFirst(t *testing.T) {
sideEffect := 0
rio := Of(42)
result := MonadChainFirst(rio, func(n int) ReaderIO[string] {
sideEffect = n
return Of("side effect")
})
value := result(context.Background())()
assert.Equal(t, 42, value)
assert.Equal(t, 42, sideEffect)
}
func TestChainFirst(t *testing.T) {
sideEffect := 0
result := F.Pipe1(
Of(42),
ChainFirst(func(n int) ReaderIO[string] {
sideEffect = n
return Of("side effect")
}),
)
value := result(context.Background())()
assert.Equal(t, 42, value)
assert.Equal(t, 42, sideEffect)
}
func TestMonadTap(t *testing.T) {
sideEffect := 0
rio := Of(42)
result := MonadTap(rio, func(n int) ReaderIO[func()] {
sideEffect = n
return Of(func() {})
})
value := result(context.Background())()
assert.Equal(t, 42, value)
assert.Equal(t, 42, sideEffect)
}
func TestTap(t *testing.T) {
sideEffect := 0
result := F.Pipe1(
Of(42),
Tap(func(n int) ReaderIO[func()] {
sideEffect = n
return Of(func() {})
}),
)
value := result(context.Background())()
assert.Equal(t, 42, value)
assert.Equal(t, 42, sideEffect)
}
func TestOf(t *testing.T) {
rio := Of(100)
result := rio(context.Background())()
assert.Equal(t, 100, result)
}
func TestMonadAp(t *testing.T) {
fabIO := Of(N.Mul(2))
faIO := Of(5)
result := MonadAp(fabIO, faIO)
assert.Equal(t, 10, result(context.Background())())
}
func TestAp(t *testing.T) {
g := F.Pipe1(
Of(utils.Double),
Ap[int](Of(1)),
)
assert.Equal(t, 2, g(context.Background())())
}
func TestMonadApSeq(t *testing.T) {
fabIO := Of(N.Add(10))
faIO := Of(5)
result := MonadApSeq(fabIO, faIO)
assert.Equal(t, 15, result(context.Background())())
}
func TestApSeq(t *testing.T) {
g := F.Pipe1(
Of(N.Add(10)),
ApSeq[int](Of(5)),
)
assert.Equal(t, 15, g(context.Background())())
}
func TestMonadApPar(t *testing.T) {
fabIO := Of(N.Add(10))
faIO := Of(5)
result := MonadApPar(fabIO, faIO)
assert.Equal(t, 15, result(context.Background())())
}
func TestApPar(t *testing.T) {
g := F.Pipe1(
Of(N.Add(10)),
ApPar[int](Of(5)),
)
assert.Equal(t, 15, g(context.Background())())
}
func TestAsk(t *testing.T) {
rio := Ask()
ctx := context.WithValue(context.Background(), "key", "value")
result := rio(ctx)()
assert.Equal(t, ctx, result)
}
func TestFromIO(t *testing.T) {
ioAction := G.Of(42)
rio := FromIO(ioAction)
result := rio(context.Background())()
assert.Equal(t, 42, result)
}
func TestFromReader(t *testing.T) {
rdr := func(ctx context.Context) int {
return 42
}
rio := FromReader(rdr)
result := rio(context.Background())()
assert.Equal(t, 42, result)
}
func TestFromLazy(t *testing.T) {
lazy := func() int { return 42 }
rio := FromLazy(lazy)
result := rio(context.Background())()
assert.Equal(t, 42, result)
}
func TestMonadChainIOK(t *testing.T) {
rio := Of(5)
result := MonadChainIOK(rio, func(n int) G.IO[int] {
return G.Of(n * 4)
})
assert.Equal(t, 20, result(context.Background())())
}
func TestChainIOK(t *testing.T) {
result := F.Pipe1(
Of(5),
ChainIOK(func(n int) G.IO[int] {
return G.Of(n * 4)
}),
)
assert.Equal(t, 20, result(context.Background())())
}
func TestMonadChainFirstIOK(t *testing.T) {
sideEffect := 0
rio := Of(42)
result := MonadChainFirstIOK(rio, func(n int) G.IO[string] {
sideEffect = n
return G.Of("side effect")
})
value := result(context.Background())()
assert.Equal(t, 42, value)
assert.Equal(t, 42, sideEffect)
}
func TestChainFirstIOK(t *testing.T) {
sideEffect := 0
result := F.Pipe1(
Of(42),
ChainFirstIOK(func(n int) G.IO[string] {
sideEffect = n
return G.Of("side effect")
}),
)
value := result(context.Background())()
assert.Equal(t, 42, value)
assert.Equal(t, 42, sideEffect)
}
func TestMonadTapIOK(t *testing.T) {
sideEffect := 0
rio := Of(42)
result := MonadTapIOK(rio, func(n int) G.IO[func()] {
sideEffect = n
return G.Of(func() {})
})
value := result(context.Background())()
assert.Equal(t, 42, value)
assert.Equal(t, 42, sideEffect)
}
func TestTapIOK(t *testing.T) {
sideEffect := 0
result := F.Pipe1(
Of(42),
TapIOK(func(n int) G.IO[func()] {
sideEffect = n
return G.Of(func() {})
}),
)
value := result(context.Background())()
assert.Equal(t, 42, value)
assert.Equal(t, 42, sideEffect)
}
func TestDefer(t *testing.T) {
counter := 0
rio := Defer(func() ReaderIO[int] {
counter++
return Of(counter)
})
result1 := rio(context.Background())()
result2 := rio(context.Background())()
assert.Equal(t, 1, result1)
assert.Equal(t, 2, result2)
}
func TestMemoize(t *testing.T) {
counter := 0
rio := Of(0)
memoized := Memoize(MonadMap(rio, func(int) int {
counter++
return counter
}))
result1 := memoized(context.Background())()
result2 := memoized(context.Background())()
assert.Equal(t, 1, result1)
assert.Equal(t, 1, result2) // Same value, memoized
}
func TestFlatten(t *testing.T) {
nested := Of(Of(42))
flattened := Flatten(nested)
result := flattened(context.Background())()
assert.Equal(t, 42, result)
}
func TestMonadFlap(t *testing.T) {
fabIO := Of(N.Mul(3))
result := MonadFlap(fabIO, 7)
assert.Equal(t, 21, result(context.Background())())
}
func TestFlap(t *testing.T) {
result := F.Pipe1(
Of(N.Mul(3)),
Flap[int](7),
)
assert.Equal(t, 21, result(context.Background())())
}
func TestMonadChainReaderK(t *testing.T) {
rio := Of(5)
result := MonadChainReaderK(rio, func(n int) reader.Reader[context.Context, int] {
return func(ctx context.Context) int { return n * 2 }
})
assert.Equal(t, 10, result(context.Background())())
}
func TestChainReaderK(t *testing.T) {
result := F.Pipe1(
Of(5),
ChainReaderK(func(n int) reader.Reader[context.Context, int] {
return func(ctx context.Context) int { return n * 2 }
}),
)
assert.Equal(t, 10, result(context.Background())())
}
func TestMonadChainFirstReaderK(t *testing.T) {
sideEffect := 0
rio := Of(42)
result := MonadChainFirstReaderK(rio, func(n int) reader.Reader[context.Context, string] {
return func(ctx context.Context) string {
sideEffect = n
return "side effect"
}
})
value := result(context.Background())()
assert.Equal(t, 42, value)
assert.Equal(t, 42, sideEffect)
}
func TestChainFirstReaderK(t *testing.T) {
sideEffect := 0
result := F.Pipe1(
Of(42),
ChainFirstReaderK(func(n int) reader.Reader[context.Context, string] {
return func(ctx context.Context) string {
sideEffect = n
return "side effect"
}
}),
)
value := result(context.Background())()
assert.Equal(t, 42, value)
assert.Equal(t, 42, sideEffect)
}
func TestMonadTapReaderK(t *testing.T) {
sideEffect := 0
rio := Of(42)
result := MonadTapReaderK(rio, func(n int) reader.Reader[context.Context, func()] {
return func(ctx context.Context) func() {
sideEffect = n
return func() {}
}
})
value := result(context.Background())()
assert.Equal(t, 42, value)
assert.Equal(t, 42, sideEffect)
}
func TestTapReaderK(t *testing.T) {
sideEffect := 0
result := F.Pipe1(
Of(42),
TapReaderK(func(n int) reader.Reader[context.Context, func()] {
return func(ctx context.Context) func() {
sideEffect = n
return func() {}
}
}),
)
value := result(context.Background())()
assert.Equal(t, 42, value)
assert.Equal(t, 42, sideEffect)
}
func TestRead(t *testing.T) {
rio := Of(42)
ctx := context.Background()
ioAction := Read[int](ctx)(rio)
result := ioAction()
assert.Equal(t, 42, result)
}
func TestComplexPipeline(t *testing.T) {
// Test a complex pipeline combining multiple operations
result := F.Pipe3(
Ask(),
Map(func(ctx context.Context) int { return 5 }),
Chain(func(n int) ReaderIO[int] {
return Of(n * 2)
}),
Map(N.Add(10)),
)
assert.Equal(t, 20, result(context.Background())()) // (5 * 2) + 10 = 20
}
func TestFromIOWithChain(t *testing.T) {
ioAction := G.Of(10)
result := F.Pipe1(
FromIO(ioAction),
Chain(func(n int) ReaderIO[int] {
return Of(n + 5)
}),
)
assert.Equal(t, 15, result(context.Background())())
}
func TestTapWithLogging(t *testing.T) {
// Simulate logging scenario
logged := []int{}
result := F.Pipe3(
Of(42),
Tap(func(n int) ReaderIO[func()] {
logged = append(logged, n)
return Of(func() {})
}),
Map(N.Mul(2)),
Tap(func(n int) ReaderIO[func()] {
logged = append(logged, n)
return Of(func() {})
}),
)
value := result(context.Background())()
assert.Equal(t, 84, value)
assert.Equal(t, []int{42, 84}, logged)
}

View File

@@ -0,0 +1,25 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerio
import (
"github.com/IBM/fp-go/v2/readerio"
)
//go:inline
func TailRec[A, B any](f Kleisli[A, Trampoline[A, B]]) Kleisli[A, B] {
return readerio.TailRec(f)
}

View File

@@ -0,0 +1,41 @@
// Copyright (c) 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerio
import (
"github.com/IBM/fp-go/v2/retry"
RG "github.com/IBM/fp-go/v2/retry/generic"
)
//go:inline
func Retrying[A any](
policy retry.RetryPolicy,
action Kleisli[retry.RetryStatus, A],
check func(A) bool,
) ReaderIO[A] {
// get an implementation for the types
return RG.Retrying(
Chain[A, A],
Chain[retry.RetryStatus, A],
Of[A],
Of[retry.RetryStatus],
Delay[retry.RetryStatus],
policy,
action,
check,
)
}

View File

@@ -0,0 +1,78 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerio
import (
"context"
"github.com/IBM/fp-go/v2/consumer"
"github.com/IBM/fp-go/v2/either"
"github.com/IBM/fp-go/v2/io"
"github.com/IBM/fp-go/v2/lazy"
"github.com/IBM/fp-go/v2/reader"
"github.com/IBM/fp-go/v2/readerio"
"github.com/IBM/fp-go/v2/tailrec"
)
type (
// Lazy represents a deferred computation that produces a value of type A when executed.
// The computation is not executed until explicitly invoked.
Lazy[A any] = lazy.Lazy[A]
// IO represents a side-effectful computation that produces a value of type A.
// The computation is deferred and only executed when invoked.
//
// IO[A] is equivalent to func() A
IO[A any] = io.IO[A]
// Reader represents a computation that depends on a context of type R.
// This is used for dependency injection and accessing shared context.
//
// Reader[R, A] is equivalent to func(R) A
Reader[R, A any] = reader.Reader[R, A]
// ReaderIO represents a context-dependent computation that performs side effects.
// This is specialized to use [context.Context] as the context type.
//
// ReaderIO[A] is equivalent to func(context.Context) func() A
ReaderIO[A any] = readerio.ReaderIO[context.Context, A]
// Kleisli represents a Kleisli arrow for the ReaderIO monad.
// It is a function that takes a value of type A and returns a ReaderIO computation
// that produces a value of type B.
//
// Kleisli arrows are used for composing monadic computations and are fundamental
// to functional programming patterns involving effects and context.
//
// Kleisli[A, B] is equivalent to func(A) func(context.Context) func() B
Kleisli[A, B any] = reader.Reader[A, ReaderIO[B]]
// Operator represents a transformation from one ReaderIO computation to another.
// It takes a ReaderIO[A] and returns a ReaderIO[B], allowing for the composition
// of context-dependent, side-effectful computations.
//
// Operators are useful for building pipelines of ReaderIO computations where
// each step can depend on the previous computation's result.
//
// Operator[A, B] is equivalent to func(ReaderIO[A]) func(context.Context) func() B
Operator[A, B any] = Kleisli[ReaderIO[A], B]
Consumer[A any] = consumer.Consumer[A]
Either[E, A any] = either.Either[E, A]
Trampoline[B, L any] = tailrec.Trampoline[B, L]
)

View File

@@ -0,0 +1,682 @@
# Sequence Functions and Point-Free Style Programming
This document explains how the `Sequence*` functions in the `context/readerioresult` package enable point-free style programming and improve code composition.
## Table of Contents
1. [What is Point-Free Style?](#what-is-point-free-style)
2. [The Problem: Nested Function Application](#the-problem-nested-function-application)
3. [The Solution: Sequence Functions](#the-solution-sequence-functions)
4. [How Sequence Enables Point-Free Style](#how-sequence-enables-point-free-style)
5. [TraverseReader: Introducing Dependencies](#traversereader-introducing-dependencies)
6. [Practical Benefits](#practical-benefits)
7. [Examples](#examples)
8. [Comparison: With and Without Sequence](#comparison-with-and-without-sequence)
## What is Point-Free Style?
Point-free style (also called tacit programming) is a programming paradigm where function definitions don't explicitly mention their arguments. Instead, functions are composed using combinators and higher-order functions.
**Traditional style (with points):**
```go
func double(x int) int {
return x * 2
}
```
**Point-free style (without points):**
```go
var double = N.Mul(2)
```
The key benefit is that point-free style emphasizes **what** the function does (its transformation) rather than **how** it manipulates data.
## The Problem: Nested Function Application
In functional programming with monadic types like `ReaderIOResult`, we often have nested structures where we need to apply parameters in a specific order. Consider:
```go
type ReaderIOResult[A any] = func(context.Context) func() Either[error, A]
type Reader[R, A any] = func(R) A
// A computation that produces a Reader
type Computation = ReaderIOResult[Reader[Config, int]]
// Expands to: func(context.Context) func() Either[error, func(Config) int]
```
To use this, we must apply parameters in this order:
1. First, provide `context.Context`
2. Then, execute the IO effect (call the function)
3. Then, unwrap the `Either` to get the `Reader`
4. Finally, provide the `Config`
This creates several problems:
### Problem 1: Awkward Parameter Order
```go
computation := getComputation()
ctx := context.Background()
cfg := Config{Value: 42}
// Must apply in this specific order
result := computation(ctx)() // Get Either[error, Reader[Config, int]]
if reader, err := either.Unwrap(result); err == nil {
value := reader(cfg) // Finally apply Config
// use value
}
```
The `Config` parameter, which is often known early and stable, must be provided last. This prevents partial application and reuse.
### Problem 2: Cannot Partially Apply Dependencies
```go
// Want to do this: create a reusable computation with Config baked in
// But can't because Config comes last!
withConfig := computation(cfg) // ❌ Doesn't work - cfg comes last, not first
```
### Problem 3: Breaks Point-Free Composition
```go
// Want to compose like this:
var pipeline = F.Flow3(
getComputation,
applyConfig(cfg), // ❌ Can't do this - Config comes last
processResult,
)
```
## The Solution: Sequence Functions
The `Sequence*` functions solve this by "flipping" or "sequencing" the nested structure, changing the order in which parameters are applied.
### SequenceReader
```go
func SequenceReader[R, A any](
ma ReaderIOResult[Reader[R, A]]
) Kleisli[R, A]
```
**Type transformation:**
```
From: func(context.Context) func() Either[error, func(R) A]
To: func(R) func(context.Context) func() Either[error, A]
```
Now `R` (the Reader's environment) comes **first**, before `context.Context`!
### SequenceReaderIO
```go
func SequenceReaderIO[R, A any](
ma ReaderIOResult[ReaderIO[R, A]]
) Kleisli[R, A]
```
**Type transformation:**
```
From: func(context.Context) func() Either[error, func(R) func() A]
To: func(R) func(context.Context) func() Either[error, A]
```
### SequenceReaderResult
```go
func SequenceReaderResult[R, A any](
ma ReaderIOResult[ReaderResult[R, A]]
) Kleisli[R, A]
```
**Type transformation:**
```
From: func(context.Context) func() Either[error, func(R) Either[error, A]]
To: func(R) func(context.Context) func() Either[error, A]
```
## How Sequence Enables Point-Free Style
### 1. Partial Application
By moving the environment parameter first, we can partially apply it:
```go
type Config struct { Multiplier int }
computation := getComputation() // ReaderIOResult[Reader[Config, int]]
sequenced := SequenceReader[Config, int](computation)
// Partially apply Config
cfg := Config{Multiplier: 5}
withConfig := sequenced(cfg) // ✅ Now we have ReaderIOResult[int]
// Reuse with different contexts
result1 := withConfig(ctx1)()
result2 := withConfig(ctx2)()
```
### 2. Dependency Injection
Inject dependencies early in the pipeline:
```go
type Database struct { ConnectionString string }
makeQuery := func(ctx context.Context) func() Either[error, func(Database) string] {
// ... implementation
}
// Sequence to enable DI
queryWithDB := SequenceReader[Database, string](makeQuery)
// Inject database
db := Database{ConnectionString: "localhost:5432"}
query := queryWithDB(db) // ✅ Database injected
// Use query with any context
result := query(context.Background())()
```
### 3. Point-Free Composition
Build pipelines without mentioning intermediate values:
```go
var pipeline = F.Flow3(
getComputation, // ReaderIOResult[Reader[Config, int]]
SequenceReader[Config, int], // func(Config) ReaderIOResult[int]
applyConfig(cfg), // ReaderIOResult[int]
)
// Or with partial application:
var withConfig = F.Pipe1(
getComputation(),
SequenceReader[Config, int],
)
result := withConfig(cfg)(ctx)()
```
### 4. Reusable Computations
Create specialized versions of generic computations:
```go
// Generic computation
makeServiceInfo := func(ctx context.Context) func() Either[error, func(ServiceConfig) string] {
// ... implementation
}
sequenced := SequenceReader[ServiceConfig, string](makeServiceInfo)
// Create specialized versions
authService := sequenced(ServiceConfig{Name: "Auth", Version: "1.0"})
userService := sequenced(ServiceConfig{Name: "User", Version: "2.0"})
// Reuse across contexts
authInfo := authService(ctx)()
userInfo := userService(ctx)()
```
## TraverseReader: Introducing Dependencies
While `SequenceReader` flips the parameter order of an existing nested structure, `TraverseReader` allows you to **introduce** a new Reader dependency into an existing computation.
### Function Signature
```go
func TraverseReader[R, A, B any](
f reader.Kleisli[R, A, B],
) func(ReaderIOResult[A]) Kleisli[R, B]
```
**Type transformation:**
```
Input: ReaderIOResult[A] = func(context.Context) func() Either[error, A]
With: reader.Kleisli[R, A, B] = func(A) func(R) B
Output: Kleisli[R, B] = func(R) func(context.Context) func() Either[error, B]
```
### What It Does
`TraverseReader` takes:
1. A Reader-based transformation `f: func(A) func(R) B` that depends on environment `R`
2. Returns a function that transforms `ReaderIOResult[A]` into `Kleisli[R, B]`
This allows you to:
- Add environment dependencies to computations that don't have them yet
- Transform values within a ReaderIOResult using environment-dependent logic
- Build composable pipelines where transformations depend on configuration
### Key Difference from SequenceReader
- **SequenceReader**: Works with computations that **already contain** a Reader (`ReaderIOResult[Reader[R, A]]`)
- Flips the order so `R` comes first
- No transformation of the value itself
- **TraverseReader**: Works with computations that **don't have** a Reader yet (`ReaderIOResult[A]`)
- Introduces a new Reader dependency via a transformation function
- Transforms `A` to `B` using environment `R`
### Example: Adding Configuration to a Computation
```go
type Config struct {
Multiplier int
Prefix string
}
// Original computation that just produces an int
getValue := func(ctx context.Context) func() Either[error, int] {
return func() Either[error, int] {
return Right[error](10)
}
}
// A Reader-based transformation that depends on Config
formatWithConfig := func(n int) func(Config) string {
return func(cfg Config) string {
result := n * cfg.Multiplier
return fmt.Sprintf("%s: %d", cfg.Prefix, result)
}
}
// Use TraverseReader to introduce Config dependency
traversed := TraverseReader[Config, int, string](formatWithConfig)
withConfig := traversed(getValue)
// Now we can provide Config to get the final result
cfg := Config{Multiplier: 5, Prefix: "Result"}
ctx := context.Background()
result := withConfig(cfg)(ctx)() // Returns Right("Result: 50")
```
### Point-Free Composition with TraverseReader
```go
// Build a pipeline that introduces dependencies at each stage
var pipeline = F.Flow4(
loadValue, // ReaderIOResult[int]
TraverseReader(multiplyByConfig), // Kleisli[Config, int]
applyConfig(cfg), // ReaderIOResult[int]
Chain(TraverseReader(formatWithStyle)), // Introduce another dependency
)
```
### When to Use TraverseReader vs SequenceReader
**Use SequenceReader when:**
- Your computation already returns a Reader: `ReaderIOResult[Reader[R, A]]`
- You just want to flip the parameter order
- No transformation of the value is needed
```go
// Already have Reader[Config, int]
computation := getComputation() // ReaderIOResult[Reader[Config, int]]
sequenced := SequenceReader[Config, int](computation)
result := sequenced(cfg)(ctx)()
```
**Use TraverseReader when:**
- Your computation doesn't have a Reader yet: `ReaderIOResult[A]`
- You want to transform the value using environment-dependent logic
- You're introducing a new dependency into the pipeline
```go
// Have ReaderIOResult[int], want to add Config dependency
computation := getValue() // ReaderIOResult[int]
traversed := TraverseReader[Config, int, string](formatWithConfig)
withDep := traversed(computation)
result := withDep(cfg)(ctx)()
```
### Practical Example: Multi-Stage Processing
```go
type DatabaseConfig struct {
ConnectionString string
Timeout time.Duration
}
type FormattingConfig struct {
DateFormat string
Timezone string
}
// Stage 1: Load raw data (no dependencies yet)
loadData := func(ctx context.Context) func() Either[error, RawData] {
// ... implementation
}
// Stage 2: Process with database config
processWithDB := func(raw RawData) func(DatabaseConfig) ProcessedData {
return func(cfg DatabaseConfig) ProcessedData {
// Use cfg.ConnectionString, cfg.Timeout
return ProcessedData{/* ... */}
}
}
// Stage 3: Format with formatting config
formatData := func(processed ProcessedData) func(FormattingConfig) string {
return func(cfg FormattingConfig) string {
// Use cfg.DateFormat, cfg.Timezone
return "formatted result"
}
}
// Build pipeline introducing dependencies at each stage
var pipeline = F.Flow3(
loadData,
TraverseReader[DatabaseConfig, RawData, ProcessedData](processWithDB),
// Now we have Kleisli[DatabaseConfig, ProcessedData]
applyConfig(dbConfig),
// Now we have ReaderIOResult[ProcessedData]
TraverseReader[FormattingConfig, ProcessedData, string](formatData),
// Now we have Kleisli[FormattingConfig, string]
)
// Execute with both configs
result := pipeline(fmtConfig)(ctx)()
```
### Combining TraverseReader and SequenceReader
You can combine both functions in complex pipelines:
```go
// Start with nested Reader
computation := getComputation() // ReaderIOResult[Reader[Config, User]]
var pipeline = F.Flow4(
computation,
SequenceReader[Config, User], // Flip to get Kleisli[Config, User]
applyConfig(cfg), // Apply config, get ReaderIOResult[User]
TraverseReader(enrichWithDatabase), // Add database dependency
// Now have Kleisli[Database, EnrichedUser]
)
result := pipeline(db)(ctx)()
```
## Practical Benefits
### 1. **Improved Testability**
Inject test dependencies easily:
```go
// Production
prodDB := Database{ConnectionString: "prod:5432"}
prodQuery := queryWithDB(prodDB)
// Testing
testDB := Database{ConnectionString: "test:5432"}
testQuery := queryWithDB(testDB)
// Same computation, different dependencies
```
### 2. **Better Separation of Concerns**
Separate configuration from execution:
```go
// Configuration phase (pure, no effects)
cfg := loadConfig()
computation := sequenced(cfg)
// Execution phase (with effects)
result := computation(ctx)()
```
### 3. **Enhanced Composability**
Build complex pipelines from simple pieces:
```go
var processUser = F.Flow4(
loadUserConfig, // ReaderIOResult[Reader[Database, User]]
SequenceReader, // func(Database) ReaderIOResult[User]
applyDatabase(db), // ReaderIOResult[User]
Chain(validateUser), // ReaderIOResult[ValidatedUser]
)
```
### 4. **Reduced Boilerplate**
No need to manually thread parameters:
```go
// Without Sequence - manual threading
func processWithConfig(cfg Config) ReaderIOResult[Result] {
return func(ctx context.Context) func() Either[error, Result] {
return func() Either[error, Result] {
comp := getComputation()(ctx)()
if reader, err := either.Unwrap(comp); err == nil {
value := reader(cfg)
// ... more processing
}
// ... error handling
}
}
}
// With Sequence - point-free
var processWithConfig = F.Flow2(
getComputation,
SequenceReader[Config, Result],
)
```
## Examples
### Example 1: Database Query with Configuration
```go
type QueryConfig struct {
Timeout time.Duration
MaxRows int
}
type Database struct {
ConnectionString string
}
// Without Sequence
func executeQueryOld(cfg QueryConfig, db Database) ReaderIOResult[[]Row] {
return func(ctx context.Context) func() Either[error, []Row] {
return func() Either[error, []Row] {
// Must manually handle all parameters
// ...
}
}
}
// With Sequence
func makeQuery(ctx context.Context) func() Either[error, func(Database) []Row] {
return func() Either[error, func(Database) []Row] {
return Right[error](func(db Database) []Row {
// Implementation
return []Row{}
})
}
}
var executeQuery = F.Flow2(
makeQuery,
SequenceReader[Database, []Row],
)
// Usage
db := Database{ConnectionString: "localhost:5432"}
query := executeQuery(db)
result := query(ctx)()
```
### Example 2: Multi-Service Architecture
```go
type ServiceRegistry struct {
AuthService AuthService
UserService UserService
EmailService EmailService
}
// Create computations that depend on services
makeAuthCheck := func(ctx context.Context) func() Either[error, func(ServiceRegistry) bool] {
// ... implementation
}
makeSendEmail := func(ctx context.Context) func() Either[error, func(ServiceRegistry) error] {
// ... implementation
}
// Sequence them
authCheck := SequenceReader[ServiceRegistry, bool](makeAuthCheck)
sendEmail := SequenceReader[ServiceRegistry, error](makeSendEmail)
// Inject services once
registry := ServiceRegistry{ /* ... */ }
checkAuth := authCheck(registry)
sendMail := sendEmail(registry)
// Use with different contexts
if isAuth, _ := either.Unwrap(checkAuth(ctx1)()); isAuth {
sendMail(ctx2)()
}
```
### Example 3: Configuration-Driven Pipeline
```go
type PipelineConfig struct {
Stage1Config Stage1Config
Stage2Config Stage2Config
Stage3Config Stage3Config
}
// Define stages
stage1 := SequenceReader[Stage1Config, IntermediateResult1](makeStage1)
stage2 := SequenceReader[Stage2Config, IntermediateResult2](makeStage2)
stage3 := SequenceReader[Stage3Config, FinalResult](makeStage3)
// Build pipeline with configuration
func buildPipeline(cfg PipelineConfig) ReaderIOResult[FinalResult] {
return F.Pipe3(
stage1(cfg.Stage1Config),
Chain(func(r1 IntermediateResult1) ReaderIOResult[IntermediateResult2] {
return stage2(cfg.Stage2Config)
}),
Chain(func(r2 IntermediateResult2) ReaderIOResult[FinalResult] {
return stage3(cfg.Stage3Config)
}),
)
}
// Execute pipeline
cfg := loadPipelineConfig()
pipeline := buildPipeline(cfg)
result := pipeline(ctx)()
```
## Comparison: With and Without Sequence
### Without Sequence (Imperative Style)
```go
func processUser(userID string) ReaderIOResult[ProcessedUser] {
return func(ctx context.Context) func() Either[error, ProcessedUser] {
return func() Either[error, ProcessedUser] {
// Get database
dbComp := getDatabase()(ctx)()
if dbReader, err := either.Unwrap(dbComp); err != nil {
return Left[ProcessedUser](err)
}
db := dbReader(dbConfig)
// Get user
userComp := getUser(userID)(ctx)()
if userReader, err := either.Unwrap(userComp); err != nil {
return Left[ProcessedUser](err)
}
user := userReader(db)
// Process user
processComp := processUserData(user)(ctx)()
if processReader, err := either.Unwrap(processComp); err != nil {
return Left[ProcessedUser](err)
}
result := processReader(processingConfig)
return Right[error](result)
}
}
}
```
### With Sequence (Point-Free Style)
```go
var processUser = func(userID string) ReaderIOResult[ProcessedUser] {
return F.Pipe3(
getDatabase,
SequenceReader[DatabaseConfig, Database],
applyConfig(dbConfig),
Chain(func(db Database) ReaderIOResult[User] {
return F.Pipe2(
getUser(userID),
SequenceReader[Database, User],
applyDB(db),
)
}),
Chain(func(user User) ReaderIOResult[ProcessedUser] {
return F.Pipe2(
processUserData(user),
SequenceReader[ProcessingConfig, ProcessedUser],
applyConfig(processingConfig),
)
}),
)
}
```
## Key Takeaways
1. **Sequence functions flip parameter order** to enable partial application
2. **Dependencies come first**, making them easy to inject and test
3. **Point-free style** becomes natural and readable
4. **Composition** is enhanced through proper parameter ordering
5. **Reusability** increases as computations can be specialized early
6. **Testability** improves through easy dependency injection
7. **Separation of concerns** is clearer (configuration vs. execution)
## When to Use Sequence
Use `Sequence*` functions when:
- ✅ You want to partially apply environment/configuration parameters
- ✅ You're building reusable computations with injected dependencies
- ✅ You need to test with different dependency implementations
- ✅ You're composing complex pipelines in point-free style
- ✅ You want to separate configuration from execution
- ✅ You're working with nested Reader-like structures
Don't use `Sequence*` when:
- ❌ The original parameter order is already optimal
- ❌ You're not doing any composition or partial application
- ❌ The added abstraction doesn't provide value
- ❌ The code is simpler without it
## Conclusion
The `Sequence*` functions are powerful tools for enabling point-free style programming in Go. By flipping the parameter order of nested monadic structures, they make it easy to:
- Partially apply dependencies
- Build composable pipelines
- Improve testability
- Write more declarative code
While they add a layer of abstraction, the benefits in terms of code reusability, testability, and composability make them invaluable for functional programming in Go.

View File

@@ -18,14 +18,13 @@ package readerioresult
import ( import (
"context" "context"
"github.com/IBM/fp-go/v2/context/readerio"
F "github.com/IBM/fp-go/v2/function" F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/internal/apply" "github.com/IBM/fp-go/v2/internal/apply"
"github.com/IBM/fp-go/v2/io" "github.com/IBM/fp-go/v2/io"
"github.com/IBM/fp-go/v2/ioeither" "github.com/IBM/fp-go/v2/ioeither"
"github.com/IBM/fp-go/v2/ioresult" "github.com/IBM/fp-go/v2/ioresult"
L "github.com/IBM/fp-go/v2/optics/lens"
"github.com/IBM/fp-go/v2/reader" "github.com/IBM/fp-go/v2/reader"
"github.com/IBM/fp-go/v2/readerio"
RIOR "github.com/IBM/fp-go/v2/readerioresult" RIOR "github.com/IBM/fp-go/v2/readerioresult"
"github.com/IBM/fp-go/v2/result" "github.com/IBM/fp-go/v2/result"
) )
@@ -96,7 +95,7 @@ func Bind[S1, S2, T any](
setter func(T) func(S1) S2, setter func(T) func(S1) S2,
f Kleisli[S1, T], f Kleisli[S1, T],
) Operator[S1, S2] { ) Operator[S1, S2] {
return RIOR.Bind(setter, f) return RIOR.Bind(setter, WithContextK(f))
} }
// Let attaches the result of a computation to a context [S1] to produce a context [S2] // Let attaches the result of a computation to a context [S1] to produce a context [S2]
@@ -128,6 +127,13 @@ func BindTo[S1, T any](
return RIOR.BindTo[context.Context](setter) return RIOR.BindTo[context.Context](setter)
} }
//go:inline
func BindToP[S1, T any](
setter Prism[S1, T],
) Operator[T, S1] {
return BindTo(setter.ReverseGet)
}
// ApS attaches a value to a context [S1] to produce a context [S2] by considering // ApS attaches a value to a context [S1] to produce a context [S2] by considering
// the context and the value concurrently (using Applicative rather than Monad). // the context and the value concurrently (using Applicative rather than Monad).
// This allows independent computations to be combined without one depending on the result of the other. // This allows independent computations to be combined without one depending on the result of the other.
@@ -214,7 +220,7 @@ func ApS[S1, S2, T any](
// //
//go:inline //go:inline
func ApSL[S, T any]( func ApSL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
fa ReaderIOResult[T], fa ReaderIOResult[T],
) Operator[S, S] { ) Operator[S, S] {
return ApS(lens.Set, fa) return ApS(lens.Set, fa)
@@ -253,10 +259,10 @@ func ApSL[S, T any](
// //
//go:inline //go:inline
func BindL[S, T any]( func BindL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
f Kleisli[T, T], f Kleisli[T, T],
) Operator[S, S] { ) Operator[S, S] {
return RIOR.BindL(lens, f) return RIOR.BindL(lens, WithContextK(f))
} }
// LetL is a variant of Let that uses a lens to focus on a specific part of the context. // LetL is a variant of Let that uses a lens to focus on a specific part of the context.
@@ -289,8 +295,8 @@ func BindL[S, T any](
// //
//go:inline //go:inline
func LetL[S, T any]( func LetL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
f func(T) T, f Endomorphism[T],
) Operator[S, S] { ) Operator[S, S] {
return RIOR.LetL[context.Context](lens, f) return RIOR.LetL[context.Context](lens, f)
} }
@@ -322,7 +328,7 @@ func LetL[S, T any](
// //
//go:inline //go:inline
func LetToL[S, T any]( func LetToL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
b T, b T,
) Operator[S, S] { ) Operator[S, S] {
return RIOR.LetToL[context.Context](lens, b) return RIOR.LetToL[context.Context](lens, b)
@@ -398,7 +404,7 @@ func BindReaderK[S1, S2, T any](
//go:inline //go:inline
func BindReaderIOK[S1, S2, T any]( func BindReaderIOK[S1, S2, T any](
setter func(T) func(S1) S2, setter func(T) func(S1) S2,
f readerio.Kleisli[context.Context, S1, T], f readerio.Kleisli[S1, T],
) Operator[S1, S2] { ) Operator[S1, S2] {
return Bind(setter, F.Flow2(f, FromReaderIO[T])) return Bind(setter, F.Flow2(f, FromReaderIO[T]))
} }
@@ -443,7 +449,7 @@ func BindResultK[S1, S2, T any](
// //
//go:inline //go:inline
func BindIOEitherKL[S, T any]( func BindIOEitherKL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
f ioresult.Kleisli[T, T], f ioresult.Kleisli[T, T],
) Operator[S, S] { ) Operator[S, S] {
return BindL(lens, F.Flow2(f, FromIOEither[T])) return BindL(lens, F.Flow2(f, FromIOEither[T]))
@@ -458,7 +464,7 @@ func BindIOEitherKL[S, T any](
// //
//go:inline //go:inline
func BindIOResultKL[S, T any]( func BindIOResultKL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
f ioresult.Kleisli[T, T], f ioresult.Kleisli[T, T],
) Operator[S, S] { ) Operator[S, S] {
return BindL(lens, F.Flow2(f, FromIOEither[T])) return BindL(lens, F.Flow2(f, FromIOEither[T]))
@@ -474,7 +480,7 @@ func BindIOResultKL[S, T any](
// //
//go:inline //go:inline
func BindIOKL[S, T any]( func BindIOKL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
f io.Kleisli[T, T], f io.Kleisli[T, T],
) Operator[S, S] { ) Operator[S, S] {
return BindL(lens, F.Flow2(f, FromIO[T])) return BindL(lens, F.Flow2(f, FromIO[T]))
@@ -490,7 +496,7 @@ func BindIOKL[S, T any](
// //
//go:inline //go:inline
func BindReaderKL[S, T any]( func BindReaderKL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
f reader.Kleisli[context.Context, T, T], f reader.Kleisli[context.Context, T, T],
) Operator[S, S] { ) Operator[S, S] {
return BindL(lens, F.Flow2(f, FromReader[T])) return BindL(lens, F.Flow2(f, FromReader[T]))
@@ -506,8 +512,8 @@ func BindReaderKL[S, T any](
// //
//go:inline //go:inline
func BindReaderIOKL[S, T any]( func BindReaderIOKL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
f readerio.Kleisli[context.Context, T, T], f readerio.Kleisli[T, T],
) Operator[S, S] { ) Operator[S, S] {
return BindL(lens, F.Flow2(f, FromReaderIO[T])) return BindL(lens, F.Flow2(f, FromReaderIO[T]))
} }
@@ -627,7 +633,7 @@ func ApResultS[S1, S2, T any](
// //
//go:inline //go:inline
func ApIOEitherSL[S, T any]( func ApIOEitherSL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
fa IOResult[T], fa IOResult[T],
) Operator[S, S] { ) Operator[S, S] {
return F.Bind2nd(F.Flow2[ReaderIOResult[S], ioresult.Operator[S, S]], ioresult.ApSL(lens, fa)) return F.Bind2nd(F.Flow2[ReaderIOResult[S], ioresult.Operator[S, S]], ioresult.ApSL(lens, fa))
@@ -642,7 +648,7 @@ func ApIOEitherSL[S, T any](
// //
//go:inline //go:inline
func ApIOResultSL[S, T any]( func ApIOResultSL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
fa IOResult[T], fa IOResult[T],
) Operator[S, S] { ) Operator[S, S] {
return F.Bind2nd(F.Flow2[ReaderIOResult[S], ioresult.Operator[S, S]], ioresult.ApSL(lens, fa)) return F.Bind2nd(F.Flow2[ReaderIOResult[S], ioresult.Operator[S, S]], ioresult.ApSL(lens, fa))
@@ -657,7 +663,7 @@ func ApIOResultSL[S, T any](
// //
//go:inline //go:inline
func ApIOSL[S, T any]( func ApIOSL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
fa IO[T], fa IO[T],
) Operator[S, S] { ) Operator[S, S] {
return ApSL(lens, FromIO(fa)) return ApSL(lens, FromIO(fa))
@@ -672,7 +678,7 @@ func ApIOSL[S, T any](
// //
//go:inline //go:inline
func ApReaderSL[S, T any]( func ApReaderSL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
fa Reader[context.Context, T], fa Reader[context.Context, T],
) Operator[S, S] { ) Operator[S, S] {
return ApSL(lens, FromReader(fa)) return ApSL(lens, FromReader(fa))
@@ -687,7 +693,7 @@ func ApReaderSL[S, T any](
// //
//go:inline //go:inline
func ApReaderIOSL[S, T any]( func ApReaderIOSL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
fa ReaderIO[T], fa ReaderIO[T],
) Operator[S, S] { ) Operator[S, S] {
return ApSL(lens, FromReaderIO(fa)) return ApSL(lens, FromReaderIO(fa))
@@ -702,7 +708,7 @@ func ApReaderIOSL[S, T any](
// //
//go:inline //go:inline
func ApEitherSL[S, T any]( func ApEitherSL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
fa Result[T], fa Result[T],
) Operator[S, S] { ) Operator[S, S] {
return ApSL(lens, FromEither(fa)) return ApSL(lens, FromEither(fa))
@@ -717,7 +723,7 @@ func ApEitherSL[S, T any](
// //
//go:inline //go:inline
func ApResultSL[S, T any]( func ApResultSL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
fa Result[T], fa Result[T],
) Operator[S, S] { ) Operator[S, S] {
return ApSL(lens, FromResult(fa)) return ApSL(lens, FromResult(fa))

View File

@@ -203,9 +203,7 @@ func TestApS_EmptyState(t *testing.T) {
result := res(t.Context())() result := res(t.Context())()
assert.True(t, E.IsRight(result)) assert.True(t, E.IsRight(result))
emptyOpt := E.ToOption(result) emptyOpt := E.ToOption(result)
assert.True(t, O.IsSome(emptyOpt)) assert.Equal(t, O.Of(Empty{}), emptyOpt)
empty, _ := O.Unwrap(emptyOpt)
assert.Equal(t, Empty{}, empty)
} }
func TestApS_ChainedWithBind(t *testing.T) { func TestApS_ChainedWithBind(t *testing.T) {

View File

@@ -16,11 +16,14 @@
package readerioresult package readerioresult
import ( import (
F "github.com/IBM/fp-go/v2/function"
RIOR "github.com/IBM/fp-go/v2/readerioresult" RIOR "github.com/IBM/fp-go/v2/readerioresult"
) )
// Bracket makes sure that a resource is cleaned up in the event of an error. The release action is called regardless of // Bracket makes sure that a resource is cleaned up in the event of an error. The release action is called regardless of
// whether the body action returns and error or not. // whether the body action returns and error or not.
//
//go:inline
func Bracket[ func Bracket[
A, B, ANY any]( A, B, ANY any](
@@ -28,5 +31,5 @@ func Bracket[
use Kleisli[A, B], use Kleisli[A, B],
release func(A, Either[B]) ReaderIOResult[ANY], release func(A, Either[B]) ReaderIOResult[ANY],
) ReaderIOResult[B] { ) ReaderIOResult[B] {
return RIOR.Bracket(acquire, use, release) return RIOR.Bracket(acquire, F.Flow2(use, WithContext), release)
} }

View File

@@ -19,6 +19,7 @@ import (
"context" "context"
CIOE "github.com/IBM/fp-go/v2/context/ioresult" CIOE "github.com/IBM/fp-go/v2/context/ioresult"
F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/ioeither" "github.com/IBM/fp-go/v2/ioeither"
) )
@@ -34,9 +35,17 @@ import (
// Returns a ReaderIOResult that checks for cancellation before executing. // Returns a ReaderIOResult that checks for cancellation before executing.
func WithContext[A any](ma ReaderIOResult[A]) ReaderIOResult[A] { func WithContext[A any](ma ReaderIOResult[A]) ReaderIOResult[A] {
return func(ctx context.Context) IOEither[A] { return func(ctx context.Context) IOEither[A] {
if err := context.Cause(ctx); err != nil { if ctx.Err() != nil {
return ioeither.Left[A](err) return ioeither.Left[A](context.Cause(ctx))
} }
return CIOE.WithContext(ctx, ma(ctx)) return CIOE.WithContext(ctx, ma(ctx))
} }
} }
//go:inline
func WithContextK[A, B any](f Kleisli[A, B]) Kleisli[A, B] {
return F.Flow2(
f,
WithContext,
)
}

View File

@@ -0,0 +1,13 @@
package readerioresult
import "github.com/IBM/fp-go/v2/io"
//go:inline
func ChainConsumer[A any](c Consumer[A]) Operator[A, struct{}] {
return ChainIOK(io.FromConsumerK(c))
}
//go:inline
func ChainFirstConsumer[A any](c Consumer[A]) Operator[A, A] {
return ChainFirstIOK(io.FromConsumerK(c))
}

View File

@@ -0,0 +1,295 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerioresult
import (
"context"
"github.com/IBM/fp-go/v2/reader"
RIO "github.com/IBM/fp-go/v2/readerio"
RIOR "github.com/IBM/fp-go/v2/readerioresult"
RR "github.com/IBM/fp-go/v2/readerresult"
)
// SequenceReader transforms a ReaderIOResult containing a Reader into a function that
// takes the Reader's environment first, then returns a ReaderIOResult.
//
// This function "flips" or "sequences" the nested structure, changing the order in which
// parameters are applied. It's particularly useful for point-free style programming where
// you want to partially apply the inner Reader's environment before dealing with the
// outer context.
//
// Type transformation:
//
// From: ReaderIOResult[Reader[R, A]]
// = func(context.Context) func() Either[error, func(R) A]
//
// To: func(context.Context) func(R) IOResult[A]
// = func(context.Context) func(R) func() Either[error, A]
//
// This allows you to:
// 1. Provide the context.Context first
// 2. Then provide the Reader's environment R
// 3. Finally execute the IO effect to get Either[error, A]
//
// Point-free style benefits:
// - Enables partial application of the Reader environment
// - Facilitates composition of Reader-based computations
// - Allows building reusable computation pipelines
// - Supports dependency injection patterns where R represents dependencies
//
// Example:
//
// type Config struct {
// Timeout int
// }
//
// // A computation that produces a Reader based on context
// func getMultiplier(ctx context.Context) func() Either[error, func(Config) int] {
// return func() Either[error, func(Config) int] {
// return Right[error](func(cfg Config) int {
// return cfg.Timeout * 2
// })
// }
// }
//
// // Sequence it to apply Config first
// sequenced := SequenceReader[Config, int](getMultiplier)
//
// // Now we can partially apply the Config
// cfg := Config{Timeout: 30}
// ctx := context.Background()
// result := sequenced(ctx)(cfg)() // Returns Right(60)
//
// This is especially useful in point-free style when building computation pipelines:
//
// var pipeline = F.Flow3(
// loadConfig, // ReaderIOResult[Reader[Database, Config]]
// SequenceReader, // func(context.Context) func(Database) IOResult[Config]
// applyToDatabase(db), // IOResult[Config]
// )
//
//go:inline
func SequenceReader[R, A any](ma ReaderIOResult[Reader[R, A]]) Kleisli[R, A] {
return RIOR.SequenceReader(ma)
}
// SequenceReaderIO transforms a ReaderIOResult containing a ReaderIO into a function that
// takes the ReaderIO's environment first, then returns a ReaderIOResult.
//
// This is similar to SequenceReader but works with ReaderIO, which represents a computation
// that depends on an environment R and performs IO effects.
//
// Type transformation:
//
// From: ReaderIOResult[ReaderIO[R, A]]
// = func(context.Context) func() Either[error, func(R) func() A]
//
// To: func(context.Context) func(R) IOResult[A]
// = func(context.Context) func(R) func() Either[error, A]
//
// The key difference from SequenceReader is that the inner computation (ReaderIO) already
// performs IO effects, so the sequencing combines these effects properly.
//
// Point-free style benefits:
// - Enables composition of ReaderIO-based computations
// - Allows partial application of environment before IO execution
// - Facilitates building effect pipelines with dependency injection
// - Supports layered architecture where R represents service dependencies
//
// Example:
//
// type Database struct {
// ConnectionString string
// }
//
// // A computation that produces a ReaderIO based on context
// func getQuery(ctx context.Context) func() Either[error, func(Database) func() string] {
// return func() Either[error, func(Database) func() string] {
// return Right[error](func(db Database) func() string {
// return func() string {
// // Perform actual IO here
// return "Query result from " + db.ConnectionString
// }
// })
// }
// }
//
// // Sequence it to apply Database first
// sequenced := SequenceReaderIO[Database, string](getQuery)
//
// // Partially apply the Database
// db := Database{ConnectionString: "localhost:5432"}
// ctx := context.Background()
// result := sequenced(ctx)(db)() // Executes IO and returns Right("Query result...")
//
// In point-free style, this enables clean composition:
//
// var executeQuery = F.Flow3(
// prepareQuery, // ReaderIOResult[ReaderIO[Database, QueryResult]]
// SequenceReaderIO, // func(context.Context) func(Database) IOResult[QueryResult]
// withDatabase(db), // IOResult[QueryResult]
// )
//
//go:inline
func SequenceReaderIO[R, A any](ma ReaderIOResult[RIO.ReaderIO[R, A]]) Kleisli[R, A] {
return RIOR.SequenceReaderIO(ma)
}
// SequenceReaderResult transforms a ReaderIOResult containing a ReaderResult into a function
// that takes the ReaderResult's environment first, then returns a ReaderIOResult.
//
// This is similar to SequenceReader but works with ReaderResult, which represents a computation
// that depends on an environment R and can fail with an error.
//
// Type transformation:
//
// From: ReaderIOResult[ReaderResult[R, A]]
// = func(context.Context) func() Either[error, func(R) Either[error, A]]
//
// To: func(context.Context) func(R) IOResult[A]
// = func(context.Context) func(R) func() Either[error, A]
//
// The sequencing properly combines the error handling from both the outer ReaderIOResult
// and the inner ReaderResult, ensuring that errors from either level are propagated correctly.
//
// Point-free style benefits:
// - Enables composition of error-handling computations with dependency injection
// - Allows partial application of dependencies before error handling
// - Facilitates building validation pipelines with environment dependencies
// - Supports service-oriented architectures with proper error propagation
//
// Example:
//
// type Config struct {
// MaxRetries int
// }
//
// // A computation that produces a ReaderResult based on context
// func validateRetries(ctx context.Context) func() Either[error, func(Config) Either[error, int]] {
// return func() Either[error, func(Config) Either[error, int]] {
// return Right[error](func(cfg Config) Either[error, int] {
// if cfg.MaxRetries < 0 {
// return Left[int](errors.New("negative retries"))
// }
// return Right[error](cfg.MaxRetries)
// })
// }
// }
//
// // Sequence it to apply Config first
// sequenced := SequenceReaderResult[Config, int](validateRetries)
//
// // Partially apply the Config
// cfg := Config{MaxRetries: 3}
// ctx := context.Background()
// result := sequenced(ctx)(cfg)() // Returns Right(3)
//
// // With invalid config
// badCfg := Config{MaxRetries: -1}
// badResult := sequenced(ctx)(badCfg)() // Returns Left(error("negative retries"))
//
// In point-free style, this enables validation pipelines:
//
// var validateAndProcess = F.Flow4(
// loadConfig, // ReaderIOResult[ReaderResult[Config, Settings]]
// SequenceReaderResult, // func(context.Context) func(Config) IOResult[Settings]
// applyConfig(cfg), // IOResult[Settings]
// Chain(processSettings), // IOResult[Result]
// )
//
//go:inline
func SequenceReaderResult[R, A any](ma ReaderIOResult[RR.ReaderResult[R, A]]) Kleisli[R, A] {
return RIOR.SequenceReaderEither(ma)
}
// TraverseReader transforms a ReaderIOResult computation by applying a Reader-based function,
// effectively introducing a new environment dependency.
//
// This function takes a Reader-based transformation (Kleisli arrow) and returns a function that
// can transform a ReaderIOResult. The result allows you to provide the Reader's environment (R)
// first, which then produces a ReaderIOResult that depends on the context.
//
// Type transformation:
//
// From: ReaderIOResult[A]
// = func(context.Context) func() Either[error, A]
//
// With: reader.Kleisli[R, A, B]
// = func(A) func(R) B
//
// To: func(ReaderIOResult[A]) func(R) ReaderIOResult[B]
// = func(ReaderIOResult[A]) func(R) func(context.Context) func() Either[error, B]
//
// This enables:
// 1. Transforming values within a ReaderIOResult using environment-dependent logic
// 2. Introducing new environment dependencies into existing computations
// 3. Building composable pipelines where transformations depend on configuration or dependencies
// 4. Point-free style composition with Reader-based transformations
//
// Type Parameters:
// - R: The environment type that the Reader depends on
// - A: The input value type
// - B: The output value type
//
// Parameters:
// - f: A Reader-based Kleisli arrow that transforms A to B using environment R
//
// Returns:
// - A function that takes a ReaderIOResult[A] and returns a Kleisli[R, B],
// which is func(R) ReaderIOResult[B]
//
// The function preserves error handling and IO effects while adding the Reader environment dependency.
//
// Example:
//
// type Config struct {
// Multiplier int
// }
//
// // A Reader-based transformation that depends on Config
// multiply := func(x int) func(Config) int {
// return func(cfg Config) int {
// return x * cfg.Multiplier
// }
// }
//
// // Original computation that produces an int
// computation := Right[int](10)
//
// // Apply TraverseReader to introduce Config dependency
// traversed := TraverseReader[Config, int, int](multiply)
// result := traversed(computation)
//
// // Now we can provide the Config to get the final result
// cfg := Config{Multiplier: 5}
// ctx := context.Background()
// finalResult := result(cfg)(ctx)() // Returns Right(50)
//
// In point-free style, this enables clean composition:
//
// var pipeline = F.Flow3(
// loadValue, // ReaderIOResult[int]
// TraverseReader(multiplyByConfig), // func(Config) ReaderIOResult[int]
// applyConfig(cfg), // ReaderIOResult[int]
// )
//
//go:inline
func TraverseReader[R, A, B any](
f reader.Kleisli[R, A, B],
) func(ReaderIOResult[A]) Kleisli[R, B] {
return RIOR.TraverseReader[context.Context](f)
}

View File

@@ -0,0 +1,333 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerioresult_test
import (
"context"
"fmt"
RIOE "github.com/IBM/fp-go/v2/context/readerioresult"
"github.com/IBM/fp-go/v2/either"
F "github.com/IBM/fp-go/v2/function"
)
// Example_sequenceReader_basicUsage demonstrates the basic usage of SequenceReader
// to flip the parameter order, enabling point-free style programming.
func Example_sequenceReader_basicUsage() {
type Config struct {
Multiplier int
}
// A computation that produces a Reader based on context
getComputation := func(ctx context.Context) func() either.Either[error, func(Config) int] {
return func() either.Either[error, func(Config) int] {
// This could check context for cancellation, deadlines, etc.
return either.Right[error](func(cfg Config) int {
return cfg.Multiplier * 10
})
}
}
// Sequence it to flip the parameter order
// Now Config comes first, then context
sequenced := RIOE.SequenceReader(getComputation)
// Partially apply the Config - this is the key benefit for point-free style
cfg := Config{Multiplier: 5}
withConfig := sequenced(cfg)
// Now we have a ReaderIOResult[int] that can be used with any context
ctx := context.Background()
result := withConfig(ctx)()
if value, err := either.Unwrap(result); err == nil {
fmt.Println(value)
}
// Output: 50
}
// Example_sequenceReader_dependencyInjection demonstrates how SequenceReader
// enables clean dependency injection patterns in point-free style.
func Example_sequenceReader_dependencyInjection() {
// Define our dependencies
type Database struct {
ConnectionString string
}
type UserService struct {
db Database
}
// A function that creates a computation requiring a Database
makeQuery := func(ctx context.Context) func() either.Either[error, func(Database) string] {
return func() either.Either[error, func(Database) string] {
return either.Right[error](func(db Database) string {
return fmt.Sprintf("Querying %s", db.ConnectionString)
})
}
}
// Sequence to enable dependency injection
queryWithDB := RIOE.SequenceReader(makeQuery)
// Inject the database dependency
db := Database{ConnectionString: "localhost:5432"}
query := queryWithDB(db)
// Execute with context
ctx := context.Background()
result := query(ctx)()
if value, err := either.Unwrap(result); err == nil {
fmt.Println(value)
}
// Output: Querying localhost:5432
}
// Example_sequenceReader_pointFreeComposition demonstrates how SequenceReader
// enables point-free style composition of computations.
func Example_sequenceReader_pointFreeComposition() {
type Config struct {
BaseValue int
}
// Step 1: Create a computation that produces a Reader
step1 := func(ctx context.Context) func() either.Either[error, func(Config) int] {
return func() either.Either[error, func(Config) int] {
return either.Right[error](func(cfg Config) int {
return cfg.BaseValue * 2
})
}
}
// Step 2: Sequence it to enable partial application
sequenced := RIOE.SequenceReader(step1)
// Step 3: Build a pipeline using point-free style
// Partially apply the config
cfg := Config{BaseValue: 10}
// Create a reusable computation with the config baked in
computation := F.Pipe1(
sequenced(cfg),
RIOE.Map(func(x int) int { return x + 5 }),
)
// Execute the pipeline
ctx := context.Background()
result := computation(ctx)()
if value, err := either.Unwrap(result); err == nil {
fmt.Println(value)
}
// Output: 25
}
// Example_sequenceReader_multipleEnvironments demonstrates using SequenceReader
// to work with multiple environment types in a clean, composable way.
func Example_sequenceReader_multipleEnvironments() {
type DatabaseConfig struct {
Host string
Port int
}
type APIConfig struct {
Endpoint string
APIKey string
}
// Function that needs DatabaseConfig
getDatabaseURL := func(ctx context.Context) func() either.Either[error, func(DatabaseConfig) string] {
return func() either.Either[error, func(DatabaseConfig) string] {
return either.Right[error](func(cfg DatabaseConfig) string {
return fmt.Sprintf("%s:%d", cfg.Host, cfg.Port)
})
}
}
// Function that needs APIConfig
getAPIURL := func(ctx context.Context) func() either.Either[error, func(APIConfig) string] {
return func() either.Either[error, func(APIConfig) string] {
return either.Right[error](func(cfg APIConfig) string {
return cfg.Endpoint
})
}
}
// Sequence both to enable partial application
withDBConfig := RIOE.SequenceReader(getDatabaseURL)
withAPIConfig := RIOE.SequenceReader(getAPIURL)
// Partially apply different configs
dbCfg := DatabaseConfig{Host: "localhost", Port: 5432}
apiCfg := APIConfig{Endpoint: "https://api.example.com", APIKey: "secret"}
dbQuery := withDBConfig(dbCfg)
apiQuery := withAPIConfig(apiCfg)
// Execute both with the same context
ctx := context.Background()
dbResult := dbQuery(ctx)()
apiResult := apiQuery(ctx)()
if dbURL, err := either.Unwrap(dbResult); err == nil {
fmt.Println("Database:", dbURL)
}
if apiURL, err := either.Unwrap(apiResult); err == nil {
fmt.Println("API:", apiURL)
}
// Output:
// Database: localhost:5432
// API: https://api.example.com
}
// Example_sequenceReaderResult_errorHandling demonstrates how SequenceReaderResult
// enables point-free style with proper error handling at multiple levels.
func Example_sequenceReaderResult_errorHandling() {
type ValidationConfig struct {
MinValue int
MaxValue int
}
// A computation that can fail at both outer and inner levels
makeValidator := func(ctx context.Context) func() either.Either[error, func(context.Context) either.Either[error, int]] {
return func() either.Either[error, func(context.Context) either.Either[error, int]] {
// Outer level: check context
if ctx.Err() != nil {
return either.Left[func(context.Context) either.Either[error, int]](ctx.Err())
}
// Return inner computation
return either.Right[error](func(innerCtx context.Context) either.Either[error, int] {
// Inner level: perform validation
value := 42
if value < 0 {
return either.Left[int](fmt.Errorf("value too small: %d", value))
}
if value > 100 {
return either.Left[int](fmt.Errorf("value too large: %d", value))
}
return either.Right[error](value)
})
}
}
// Sequence to enable point-free composition
sequenced := RIOE.SequenceReaderResult(makeValidator)
// Build a pipeline with error handling
ctx := context.Background()
pipeline := F.Pipe2(
sequenced(ctx),
RIOE.Map(func(x int) int { return x * 2 }),
RIOE.Chain(func(x int) RIOE.ReaderIOResult[string] {
return RIOE.Of(fmt.Sprintf("Result: %d", x))
}),
)
result := pipeline(ctx)()
if value, err := either.Unwrap(result); err == nil {
fmt.Println(value)
}
// Output: Result: 84
}
// Example_sequenceReader_partialApplication demonstrates the power of partial
// application enabled by SequenceReader for building reusable computations.
func Example_sequenceReader_partialApplication() {
type ServiceConfig struct {
ServiceName string
Version string
}
// Create a computation factory
makeServiceInfo := func(ctx context.Context) func() either.Either[error, func(ServiceConfig) string] {
return func() either.Either[error, func(ServiceConfig) string] {
return either.Right[error](func(cfg ServiceConfig) string {
return fmt.Sprintf("%s v%s", cfg.ServiceName, cfg.Version)
})
}
}
// Sequence it
sequenced := RIOE.SequenceReader(makeServiceInfo)
// Create multiple service configurations
authConfig := ServiceConfig{ServiceName: "AuthService", Version: "1.0.0"}
userConfig := ServiceConfig{ServiceName: "UserService", Version: "2.1.0"}
// Partially apply each config to create specialized computations
getAuthInfo := sequenced(authConfig)
getUserInfo := sequenced(userConfig)
// These can now be reused across different contexts
ctx := context.Background()
authResult := getAuthInfo(ctx)()
userResult := getUserInfo(ctx)()
if auth, err := either.Unwrap(authResult); err == nil {
fmt.Println(auth)
}
if user, err := either.Unwrap(userResult); err == nil {
fmt.Println(user)
}
// Output:
// AuthService v1.0.0
// UserService v2.1.0
}
// Example_sequenceReader_testingBenefits demonstrates how SequenceReader
// makes testing easier by allowing you to inject test dependencies.
func Example_sequenceReader_testingBenefits() {
// Simple logger that collects messages
type SimpleLogger struct {
Messages []string
}
// A computation that depends on a logger (using the struct directly)
makeLoggingOperation := func(ctx context.Context) func() either.Either[error, func(*SimpleLogger) string] {
return func() either.Either[error, func(*SimpleLogger) string] {
return either.Right[error](func(logger *SimpleLogger) string {
logger.Messages = append(logger.Messages, "Operation started")
result := "Success"
logger.Messages = append(logger.Messages, fmt.Sprintf("Operation completed: %s", result))
return result
})
}
}
// Sequence to enable dependency injection
sequenced := RIOE.SequenceReader(makeLoggingOperation)
// Inject a test logger
testLogger := &SimpleLogger{Messages: []string{}}
operation := sequenced(testLogger)
// Execute
ctx := context.Background()
result := operation(ctx)()
if value, err := either.Unwrap(result); err == nil {
fmt.Println("Result:", value)
fmt.Println("Logs:", len(testLogger.Messages))
}
// Output:
// Result: Success
// Logs: 2
}

View File

@@ -0,0 +1,866 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerioresult
import (
"context"
"errors"
"fmt"
"testing"
"github.com/IBM/fp-go/v2/either"
"github.com/stretchr/testify/assert"
)
func TestSequenceReader(t *testing.T) {
t.Run("flips parameter order for simple types", func(t *testing.T) {
// Original: ReaderIOResult[Reader[string, int]]
// = func(context.Context) func() Either[error, func(string) int]
original := func(ctx context.Context) func() Either[Reader[string, int]] {
return func() Either[Reader[string, int]] {
return either.Right[error](func(s string) int {
return 10 + len(s)
})
}
}
// Sequenced: func(string) func(context.Context) IOResult[int]
// The Reader environment (string) is now the first parameter
sequenced := SequenceReader(original)
ctx := context.Background()
// Test original
result1 := original(ctx)()
assert.True(t, either.IsRight(result1))
innerFunc1, _ := either.Unwrap(result1)
value1 := innerFunc1("hello")
assert.Equal(t, 15, value1)
// Test sequenced - note the flipped order: string first, then context
result2 := sequenced("hello")(ctx)()
assert.True(t, either.IsRight(result2))
value2, _ := either.Unwrap(result2)
assert.Equal(t, 15, value2)
})
t.Run("flips parameter order for struct types", func(t *testing.T) {
type Database struct {
ConnectionString string
}
// Original: ReaderIOResult[Reader[Database, string]]
query := func(ctx context.Context) func() Either[Reader[Database, string]] {
return func() Either[Reader[Database, string]] {
if ctx.Err() != nil {
return either.Left[Reader[Database, string]](ctx.Err())
}
return either.Right[error](func(db Database) string {
return fmt.Sprintf("Query on %s", db.ConnectionString)
})
}
}
db := Database{ConnectionString: "localhost:5432"}
ctx := context.Background()
expected := "Query on localhost:5432"
// Sequence it
sequenced := SequenceReader(query)
// Test original with valid inputs
result1 := query(ctx)()
assert.True(t, either.IsRight(result1))
innerFunc1, _ := either.Unwrap(result1)
value1 := innerFunc1(db)
assert.Equal(t, expected, value1)
// Test sequenced with valid inputs - Database first, then context
result2 := sequenced(db)(ctx)()
assert.True(t, either.IsRight(result2))
value2, _ := either.Unwrap(result2)
assert.Equal(t, expected, value2)
})
t.Run("preserves outer error", func(t *testing.T) {
expectedError := errors.New("outer error")
// Original that fails at outer level
original := func(ctx context.Context) func() Either[Reader[string, int]] {
return func() Either[Reader[string, int]] {
return either.Left[Reader[string, int]](expectedError)
}
}
ctx := context.Background()
// Test original with error
result1 := original(ctx)()
assert.True(t, either.IsLeft(result1))
_, err1 := either.Unwrap(result1)
assert.Equal(t, expectedError, err1)
// Test sequenced - the outer error is preserved
sequenced := SequenceReader(original)
result2 := sequenced("test")(ctx)()
assert.True(t, either.IsLeft(result2))
_, err2 := either.Unwrap(result2)
assert.Equal(t, expectedError, err2)
})
t.Run("preserves computation logic", func(t *testing.T) {
// Original function
original := func(ctx context.Context) func() Either[Reader[string, int]] {
return func() Either[Reader[string, int]] {
return either.Right[error](func(s string) int {
return 3 * len(s)
})
}
}
ctx := context.Background()
// Sequence
sequenced := SequenceReader(original)
// Test that sequence produces correct results
result1 := original(ctx)()
innerFunc1, _ := either.Unwrap(result1)
value1 := innerFunc1("test")
result2 := sequenced("test")(ctx)()
value2, _ := either.Unwrap(result2)
assert.Equal(t, value1, value2)
assert.Equal(t, 12, value2) // 3 * 4
})
t.Run("works with zero values", func(t *testing.T) {
original := func(ctx context.Context) func() Either[Reader[string, int]] {
return func() Either[Reader[string, int]] {
return either.Right[error](func(s string) int {
return len(s)
})
}
}
ctx := context.Background()
sequenced := SequenceReader(original)
// Test with zero values
result1 := original(ctx)()
innerFunc1, _ := either.Unwrap(result1)
value1 := innerFunc1("")
assert.Equal(t, 0, value1)
result2 := sequenced("")(ctx)()
value2, _ := either.Unwrap(result2)
assert.Equal(t, 0, value2)
})
t.Run("respects context cancellation", func(t *testing.T) {
original := func(ctx context.Context) func() Either[Reader[string, int]] {
return func() Either[Reader[string, int]] {
if ctx.Err() != nil {
return either.Left[Reader[string, int]](ctx.Err())
}
return either.Right[error](func(s string) int {
return len(s)
})
}
}
ctx, cancel := context.WithCancel(context.Background())
cancel()
sequenced := SequenceReader(original)
result := sequenced("test")(ctx)()
assert.True(t, either.IsLeft(result))
_, err := either.Unwrap(result)
assert.Equal(t, context.Canceled, err)
})
t.Run("enables point-free style with partial application", func(t *testing.T) {
type Config struct {
Multiplier int
}
// Original computation
original := func(ctx context.Context) func() Either[Reader[Config, int]] {
return func() Either[Reader[Config, int]] {
return either.Right[error](func(cfg Config) int {
return cfg.Multiplier * 10
})
}
}
// Sequence to enable partial application
sequenced := SequenceReader(original)
// Partially apply the Config
cfg := Config{Multiplier: 5}
withConfig := sequenced(cfg)
// Now we have a ReaderIOResult[int] that can be used in different contexts
ctx1 := context.Background()
result1 := withConfig(ctx1)()
assert.True(t, either.IsRight(result1))
value1, _ := either.Unwrap(result1)
assert.Equal(t, 50, value1)
// Can reuse with different context
ctx2 := context.Background()
result2 := withConfig(ctx2)()
assert.True(t, either.IsRight(result2))
value2, _ := either.Unwrap(result2)
assert.Equal(t, 50, value2)
})
}
func TestSequenceReaderIO(t *testing.T) {
t.Run("flips parameter order for simple types", func(t *testing.T) {
// Original: ReaderIOResult[ReaderIO[int]]
// = func(context.Context) func() Either[error, func(context.Context) func() int]
original := func(ctx context.Context) func() Either[ReaderIO[int]] {
return func() Either[ReaderIO[int]] {
return either.Right[error](func(innerCtx context.Context) func() int {
return func() int {
return 20
}
})
}
}
ctx := context.Background()
sequenced := SequenceReaderIO(original)
// Test original
result1 := original(ctx)()
assert.True(t, either.IsRight(result1))
innerFunc1, _ := either.Unwrap(result1)
value1 := innerFunc1(ctx)()
assert.Equal(t, 20, value1)
// Test sequenced - context first, then context again for inner ReaderIO
result2 := sequenced(ctx)(ctx)()
assert.True(t, either.IsRight(result2))
value2, _ := either.Unwrap(result2)
assert.Equal(t, 20, value2)
})
t.Run("preserves outer error", func(t *testing.T) {
expectedError := errors.New("outer error")
// Original that fails at outer level
original := func(ctx context.Context) func() Either[ReaderIO[int]] {
return func() Either[ReaderIO[int]] {
return either.Left[ReaderIO[int]](expectedError)
}
}
ctx := context.Background()
// Test original with error
result1 := original(ctx)()
assert.True(t, either.IsLeft(result1))
_, err1 := either.Unwrap(result1)
assert.Equal(t, expectedError, err1)
// Test sequenced - the outer error is preserved
sequenced := SequenceReaderIO(original)
result2 := sequenced(ctx)(ctx)()
assert.True(t, either.IsLeft(result2))
_, err2 := either.Unwrap(result2)
assert.Equal(t, expectedError, err2)
})
t.Run("respects context cancellation in outer context", func(t *testing.T) {
original := func(ctx context.Context) func() Either[ReaderIO[int]] {
return func() Either[ReaderIO[int]] {
if ctx.Err() != nil {
return either.Left[ReaderIO[int]](ctx.Err())
}
return either.Right[error](func(innerCtx context.Context) func() int {
return func() int {
return 20
}
})
}
}
ctx, cancel := context.WithCancel(context.Background())
cancel()
sequenced := SequenceReaderIO(original)
result := sequenced(ctx)(ctx)()
assert.True(t, either.IsLeft(result))
_, err := either.Unwrap(result)
assert.Equal(t, context.Canceled, err)
})
}
func TestSequenceReaderResult(t *testing.T) {
t.Run("flips parameter order for simple types", func(t *testing.T) {
// Original: ReaderIOResult[ReaderResult[int]]
// = func(context.Context) func() Either[error, func(context.Context) Either[error, int]]
original := func(ctx context.Context) func() Either[ReaderResult[int]] {
return func() Either[ReaderResult[int]] {
return either.Right[error](func(innerCtx context.Context) Either[int] {
return either.Right[error](20)
})
}
}
ctx := context.Background()
sequenced := SequenceReaderResult(original)
// Test original
result1 := original(ctx)()
assert.True(t, either.IsRight(result1))
innerFunc1, _ := either.Unwrap(result1)
innerResult1 := innerFunc1(ctx)
assert.True(t, either.IsRight(innerResult1))
value1, _ := either.Unwrap(innerResult1)
assert.Equal(t, 20, value1)
// Test sequenced
result2 := sequenced(ctx)(ctx)()
assert.True(t, either.IsRight(result2))
value2, _ := either.Unwrap(result2)
assert.Equal(t, 20, value2)
})
t.Run("preserves outer error", func(t *testing.T) {
expectedError := errors.New("outer error")
// Original that fails at outer level
original := func(ctx context.Context) func() Either[ReaderResult[int]] {
return func() Either[ReaderResult[int]] {
return either.Left[ReaderResult[int]](expectedError)
}
}
ctx := context.Background()
// Test original with error
result1 := original(ctx)()
assert.True(t, either.IsLeft(result1))
_, err1 := either.Unwrap(result1)
assert.Equal(t, expectedError, err1)
// Test sequenced - the outer error is preserved
sequenced := SequenceReaderResult(original)
result2 := sequenced(ctx)(ctx)()
assert.True(t, either.IsLeft(result2))
_, err2 := either.Unwrap(result2)
assert.Equal(t, expectedError, err2)
})
t.Run("preserves inner error", func(t *testing.T) {
expectedError := errors.New("inner error")
// Original that fails at inner level
original := func(ctx context.Context) func() Either[ReaderResult[int]] {
return func() Either[ReaderResult[int]] {
return either.Right[error](func(innerCtx context.Context) Either[int] {
return either.Left[int](expectedError)
})
}
}
ctx := context.Background()
// Test original with inner error
result1 := original(ctx)()
assert.True(t, either.IsRight(result1))
innerFunc1, _ := either.Unwrap(result1)
innerResult1 := innerFunc1(ctx)
assert.True(t, either.IsLeft(innerResult1))
_, innerErr1 := either.Unwrap(innerResult1)
assert.Equal(t, expectedError, innerErr1)
// Test sequenced with inner error
sequenced := SequenceReaderResult(original)
result2 := sequenced(ctx)(ctx)()
assert.True(t, either.IsLeft(result2))
_, innerErr2 := either.Unwrap(result2)
assert.Equal(t, expectedError, innerErr2)
})
t.Run("handles errors at different levels", func(t *testing.T) {
// Original that can fail at both levels
makeOriginal := func(x int) ReaderIOResult[ReaderResult[int]] {
return func(ctx context.Context) func() Either[ReaderResult[int]] {
return func() Either[ReaderResult[int]] {
if x < -10 {
return either.Left[ReaderResult[int]](errors.New("outer: too negative"))
}
return either.Right[error](func(innerCtx context.Context) Either[int] {
if x < 0 {
return either.Left[int](errors.New("inner: negative value"))
}
return either.Right[error](x * 2)
})
}
}
}
ctx := context.Background()
// Test outer error
sequenced1 := SequenceReaderResult(makeOriginal(-20))
result1 := sequenced1(ctx)(ctx)()
assert.True(t, either.IsLeft(result1))
_, err1 := either.Unwrap(result1)
assert.Contains(t, err1.Error(), "outer")
// Test inner error
sequenced2 := SequenceReaderResult(makeOriginal(-5))
result2 := sequenced2(ctx)(ctx)()
assert.True(t, either.IsLeft(result2))
_, err2 := either.Unwrap(result2)
assert.Contains(t, err2.Error(), "inner")
// Test success
sequenced3 := SequenceReaderResult(makeOriginal(10))
result3 := sequenced3(ctx)(ctx)()
assert.True(t, either.IsRight(result3))
value3, _ := either.Unwrap(result3)
assert.Equal(t, 20, value3)
})
t.Run("respects context cancellation", func(t *testing.T) {
original := func(ctx context.Context) func() Either[ReaderResult[int]] {
return func() Either[ReaderResult[int]] {
if ctx.Err() != nil {
return either.Left[ReaderResult[int]](ctx.Err())
}
return either.Right[error](func(innerCtx context.Context) Either[int] {
if innerCtx.Err() != nil {
return either.Left[int](innerCtx.Err())
}
return either.Right[error](20)
})
}
}
ctx, cancel := context.WithCancel(context.Background())
cancel()
sequenced := SequenceReaderResult(original)
result := sequenced(ctx)(ctx)()
assert.True(t, either.IsLeft(result))
_, err := either.Unwrap(result)
assert.Equal(t, context.Canceled, err)
})
}
func TestSequenceEdgeCases(t *testing.T) {
t.Run("works with empty struct", func(t *testing.T) {
type Empty struct{}
original := func(ctx context.Context) func() Either[Reader[Empty, int]] {
return func() Either[Reader[Empty, int]] {
return either.Right[error](func(e Empty) int {
return 20
})
}
}
ctx := context.Background()
empty := Empty{}
sequenced := SequenceReader(original)
result1 := original(ctx)()
innerFunc1, _ := either.Unwrap(result1)
value1 := innerFunc1(empty)
assert.Equal(t, 20, value1)
result2 := sequenced(empty)(ctx)()
value2, _ := either.Unwrap(result2)
assert.Equal(t, 20, value2)
})
t.Run("works with pointer types", func(t *testing.T) {
type Data struct {
Value int
}
original := func(ctx context.Context) func() Either[Reader[*Data, int]] {
return func() Either[Reader[*Data, int]] {
return either.Right[error](func(d *Data) int {
if d == nil {
return 42
}
return 42 + d.Value
})
}
}
ctx := context.Background()
data := &Data{Value: 100}
sequenced := SequenceReader(original)
// Test with non-nil pointer
result1 := original(ctx)()
innerFunc1, _ := either.Unwrap(result1)
value1 := innerFunc1(data)
assert.Equal(t, 142, value1)
result2 := sequenced(data)(ctx)()
value2, _ := either.Unwrap(result2)
assert.Equal(t, 142, value2)
// Test with nil pointer
result3 := sequenced(nil)(ctx)()
value3, _ := either.Unwrap(result3)
assert.Equal(t, 42, value3)
})
t.Run("maintains referential transparency", func(t *testing.T) {
// The same inputs should always produce the same outputs
original := func(ctx context.Context) func() Either[Reader[string, int]] {
return func() Either[Reader[string, int]] {
return either.Right[error](func(s string) int {
return 10 + len(s)
})
}
}
ctx := context.Background()
sequenced := SequenceReader(original)
// Call multiple times with same inputs
for range 5 {
result1 := original(ctx)()
innerFunc1, _ := either.Unwrap(result1)
value1 := innerFunc1("hello")
assert.Equal(t, 15, value1)
result2 := sequenced("hello")(ctx)()
value2, _ := either.Unwrap(result2)
assert.Equal(t, 15, value2)
}
})
}
func TestTraverseReader(t *testing.T) {
t.Run("basic transformation with Reader dependency", func(t *testing.T) {
type Config struct {
Multiplier int
}
// Original computation
original := Right(10)
// Reader-based transformation
multiply := func(x int) Reader[Config, int] {
return func(cfg Config) int {
return x * cfg.Multiplier
}
}
// Apply TraverseReader
traversed := TraverseReader(multiply)
result := traversed(original)
// Provide Config and execute
cfg := Config{Multiplier: 5}
ctx := context.Background()
finalResult := result(cfg)(ctx)()
assert.True(t, either.IsRight(finalResult))
value, _ := either.Unwrap(finalResult)
assert.Equal(t, 50, value)
})
t.Run("preserves outer error", func(t *testing.T) {
type Config struct {
Multiplier int
}
expectedError := errors.New("computation failed")
// Original computation that fails
original := Left[int](expectedError)
// Reader-based transformation (won't be called)
multiply := func(x int) Reader[Config, int] {
return func(cfg Config) int {
return x * cfg.Multiplier
}
}
// Apply TraverseReader
traversed := TraverseReader(multiply)
result := traversed(original)
// Provide Config and execute
cfg := Config{Multiplier: 5}
ctx := context.Background()
finalResult := result(cfg)(ctx)()
assert.True(t, either.IsLeft(finalResult))
_, err := either.Unwrap(finalResult)
assert.Equal(t, expectedError, err)
})
t.Run("works with different types", func(t *testing.T) {
type Database struct {
Prefix string
}
// Original computation producing an int
original := Right(42)
// Reader-based transformation: int -> string using Database
format := func(x int) func(Database) string {
return func(db Database) string {
return fmt.Sprintf("%s:%d", db.Prefix, x)
}
}
// Apply TraverseReader
traversed := TraverseReader(format)
result := traversed(original)
// Provide Database and execute
db := Database{Prefix: "ID"}
ctx := context.Background()
finalResult := result(db)(ctx)()
assert.True(t, either.IsRight(finalResult))
value, _ := either.Unwrap(finalResult)
assert.Equal(t, "ID:42", value)
})
t.Run("works with struct environments", func(t *testing.T) {
type Settings struct {
Prefix string
Suffix string
}
// Original computation
original := Right("value")
// Reader-based transformation using Settings
decorate := func(s string) func(Settings) string {
return func(settings Settings) string {
return settings.Prefix + s + settings.Suffix
}
}
// Apply TraverseReader
traversed := TraverseReader(decorate)
result := traversed(original)
// Provide Settings and execute
settings := Settings{Prefix: "[", Suffix: "]"}
ctx := context.Background()
finalResult := result(settings)(ctx)()
assert.True(t, either.IsRight(finalResult))
value, _ := either.Unwrap(finalResult)
assert.Equal(t, "[value]", value)
})
t.Run("enables partial application", func(t *testing.T) {
type Config struct {
Factor int
}
// Original computation
original := Right(10)
// Reader-based transformation
scale := func(x int) Reader[Config, int] {
return func(cfg Config) int {
return x * cfg.Factor
}
}
// Apply TraverseReader
traversed := TraverseReader(scale)
result := traversed(original)
// Partially apply Config
cfg := Config{Factor: 3}
withConfig := result(cfg)
// Can now use with different contexts
ctx1 := context.Background()
finalResult1 := withConfig(ctx1)()
assert.True(t, either.IsRight(finalResult1))
value1, _ := either.Unwrap(finalResult1)
assert.Equal(t, 30, value1)
// Reuse with different context
ctx2 := context.Background()
finalResult2 := withConfig(ctx2)()
assert.True(t, either.IsRight(finalResult2))
value2, _ := either.Unwrap(finalResult2)
assert.Equal(t, 30, value2)
})
t.Run("respects context cancellation", func(t *testing.T) {
type Config struct {
Value int
}
// Original computation that checks context
original := func(ctx context.Context) func() Either[int] {
return func() Either[int] {
if ctx.Err() != nil {
return either.Left[int](ctx.Err())
}
return either.Right[error](10)
}
}
// Reader-based transformation
multiply := func(x int) Reader[Config, int] {
return func(cfg Config) int {
return x * cfg.Value
}
}
// Apply TraverseReader
traversed := TraverseReader(multiply)
result := traversed(original)
// Use canceled context
ctx, cancel := context.WithCancel(context.Background())
cancel()
cfg := Config{Value: 5}
finalResult := result(cfg)(ctx)()
assert.True(t, either.IsLeft(finalResult))
_, err := either.Unwrap(finalResult)
assert.Equal(t, context.Canceled, err)
})
t.Run("works with zero values", func(t *testing.T) {
type Config struct {
Offset int
}
// Original computation with zero value
original := Right(0)
// Reader-based transformation
add := func(x int) Reader[Config, int] {
return func(cfg Config) int {
return x + cfg.Offset
}
}
// Apply TraverseReader
traversed := TraverseReader(add)
result := traversed(original)
// Provide Config with zero offset
cfg := Config{Offset: 0}
ctx := context.Background()
finalResult := result(cfg)(ctx)()
assert.True(t, either.IsRight(finalResult))
value, _ := either.Unwrap(finalResult)
assert.Equal(t, 0, value)
})
t.Run("chains multiple transformations", func(t *testing.T) {
type Config struct {
Multiplier int
}
// Original computation
original := Right(5)
// First Reader-based transformation
multiply := func(x int) Reader[Config, int] {
return func(cfg Config) int {
return x * cfg.Multiplier
}
}
// Apply TraverseReader
traversed := TraverseReader(multiply)
result := traversed(original)
// Provide Config and execute
cfg := Config{Multiplier: 4}
ctx := context.Background()
finalResult := result(cfg)(ctx)()
assert.True(t, either.IsRight(finalResult))
value, _ := either.Unwrap(finalResult)
assert.Equal(t, 20, value) // 5 * 4 = 20
})
t.Run("works with complex Reader logic", func(t *testing.T) {
type ValidationRules struct {
MinValue int
MaxValue int
}
// Original computation
original := Right(50)
// Reader-based transformation with validation logic
validate := func(x int) func(ValidationRules) int {
return func(rules ValidationRules) int {
if x < rules.MinValue {
return rules.MinValue
}
if x > rules.MaxValue {
return rules.MaxValue
}
return x
}
}
// Apply TraverseReader
traversed := TraverseReader(validate)
result := traversed(original)
// Test with value within range
rules1 := ValidationRules{MinValue: 0, MaxValue: 100}
ctx := context.Background()
finalResult1 := result(rules1)(ctx)()
assert.True(t, either.IsRight(finalResult1))
value1, _ := either.Unwrap(finalResult1)
assert.Equal(t, 50, value1)
// Test with value above max
rules2 := ValidationRules{MinValue: 0, MaxValue: 30}
finalResult2 := result(rules2)(ctx)()
assert.True(t, either.IsRight(finalResult2))
value2, _ := either.Unwrap(finalResult2)
assert.Equal(t, 30, value2) // Clamped to max
// Test with value below min
rules3 := ValidationRules{MinValue: 60, MaxValue: 100}
finalResult3 := result(rules3)(ctx)()
assert.True(t, either.IsRight(finalResult3))
value3, _ := either.Unwrap(finalResult3)
assert.Equal(t, 60, value3) // Clamped to min
})
}

View File

@@ -53,12 +53,12 @@ import (
RIOE "github.com/IBM/fp-go/v2/context/readerioresult" RIOE "github.com/IBM/fp-go/v2/context/readerioresult"
RIOEH "github.com/IBM/fp-go/v2/context/readerioresult/http" RIOEH "github.com/IBM/fp-go/v2/context/readerioresult/http"
E "github.com/IBM/fp-go/v2/either"
F "github.com/IBM/fp-go/v2/function" F "github.com/IBM/fp-go/v2/function"
R "github.com/IBM/fp-go/v2/http/builder" R "github.com/IBM/fp-go/v2/http/builder"
H "github.com/IBM/fp-go/v2/http/headers" H "github.com/IBM/fp-go/v2/http/headers"
LZ "github.com/IBM/fp-go/v2/lazy" LZ "github.com/IBM/fp-go/v2/lazy"
O "github.com/IBM/fp-go/v2/option" O "github.com/IBM/fp-go/v2/option"
"github.com/IBM/fp-go/v2/result"
) )
// Requester converts an http/builder.Builder into a ReaderIOResult that produces HTTP requests. // Requester converts an http/builder.Builder into a ReaderIOResult that produces HTTP requests.
@@ -143,10 +143,10 @@ func Requester(builder *R.Builder) RIOEH.Requester {
return F.Pipe5( return F.Pipe5(
builder.GetBody(), builder.GetBody(),
O.Fold(LZ.Of(E.Of[error](withoutBody)), E.Map[error](withBody)), O.Fold(LZ.Of(result.Of(withoutBody)), result.Map(withBody)),
E.Ap[func(string) RIOE.ReaderIOResult[*http.Request]](builder.GetTargetURL()), result.Ap[RIOE.Kleisli[string, *http.Request]](builder.GetTargetURL()),
E.Flap[error, RIOE.ReaderIOResult[*http.Request]](builder.GetMethod()), result.Flap[RIOE.ReaderIOResult[*http.Request]](builder.GetMethod()),
E.GetOrElse(RIOE.Left[*http.Request]), result.GetOrElse(RIOE.Left[*http.Request]),
RIOE.Map(func(req *http.Request) *http.Request { RIOE.Map(func(req *http.Request) *http.Request {
req.Header = H.Monoid.Concat(req.Header, builder.GetHeaders()) req.Header = H.Monoid.Concat(req.Header, builder.GetHeaders())
return req return req

View File

@@ -73,7 +73,7 @@ type (
// It wraps a standard http.Client and provides functional HTTP operations. // It wraps a standard http.Client and provides functional HTTP operations.
client struct { client struct {
delegate *http.Client delegate *http.Client
doIOE func(*http.Request) IOE.IOEither[error, *http.Response] doIOE IOE.Kleisli[error, *http.Request, *http.Response]
} }
) )
@@ -158,7 +158,7 @@ func MakeClient(httpClient *http.Client) Client {
// request := MakeGetRequest("https://api.example.com/data") // request := MakeGetRequest("https://api.example.com/data")
// fullResp := ReadFullResponse(client)(request) // fullResp := ReadFullResponse(client)(request)
// result := fullResp(context.Background())() // result := fullResp(context.Background())()
func ReadFullResponse(client Client) func(Requester) RIOE.ReaderIOResult[H.FullResponse] { func ReadFullResponse(client Client) RIOE.Kleisli[Requester, H.FullResponse] {
return func(req Requester) RIOE.ReaderIOResult[H.FullResponse] { return func(req Requester) RIOE.ReaderIOResult[H.FullResponse] {
return F.Flow3( return F.Flow3(
client.Do(req), client.Do(req),
@@ -195,7 +195,7 @@ func ReadFullResponse(client Client) func(Requester) RIOE.ReaderIOResult[H.FullR
// request := MakeGetRequest("https://api.example.com/data") // request := MakeGetRequest("https://api.example.com/data")
// readBytes := ReadAll(client) // readBytes := ReadAll(client)
// result := readBytes(request)(context.Background())() // result := readBytes(request)(context.Background())()
func ReadAll(client Client) func(Requester) RIOE.ReaderIOResult[[]byte] { func ReadAll(client Client) RIOE.Kleisli[Requester, []byte] {
return F.Flow2( return F.Flow2(
ReadFullResponse(client), ReadFullResponse(client),
RIOE.Map(H.Body), RIOE.Map(H.Body),
@@ -219,7 +219,7 @@ func ReadAll(client Client) func(Requester) RIOE.ReaderIOResult[[]byte] {
// request := MakeGetRequest("https://api.example.com/text") // request := MakeGetRequest("https://api.example.com/text")
// readText := ReadText(client) // readText := ReadText(client)
// result := readText(request)(context.Background())() // result := readText(request)(context.Background())()
func ReadText(client Client) func(Requester) RIOE.ReaderIOResult[string] { func ReadText(client Client) RIOE.Kleisli[Requester, string] {
return F.Flow2( return F.Flow2(
ReadAll(client), ReadAll(client),
RIOE.Map(B.ToString), RIOE.Map(B.ToString),
@@ -231,7 +231,7 @@ func ReadText(client Client) func(Requester) RIOE.ReaderIOResult[string] {
// Deprecated: Use [ReadJSON] instead. This function is kept for backward compatibility // Deprecated: Use [ReadJSON] instead. This function is kept for backward compatibility
// but will be removed in a future version. The capitalized version follows Go naming // but will be removed in a future version. The capitalized version follows Go naming
// conventions for acronyms. // conventions for acronyms.
func ReadJson[A any](client Client) func(Requester) RIOE.ReaderIOResult[A] { func ReadJson[A any](client Client) RIOE.Kleisli[Requester, A] {
return ReadJSON[A](client) return ReadJSON[A](client)
} }
@@ -242,7 +242,7 @@ func ReadJson[A any](client Client) func(Requester) RIOE.ReaderIOResult[A] {
// 3. Reads the response body as bytes // 3. Reads the response body as bytes
// //
// This function is used internally by ReadJSON to ensure proper JSON response handling. // This function is used internally by ReadJSON to ensure proper JSON response handling.
func readJSON(client Client) func(Requester) RIOE.ReaderIOResult[[]byte] { func readJSON(client Client) RIOE.Kleisli[Requester, []byte] {
return F.Flow3( return F.Flow3(
ReadFullResponse(client), ReadFullResponse(client),
RIOE.ChainFirstEitherK(F.Flow2( RIOE.ChainFirstEitherK(F.Flow2(
@@ -278,7 +278,7 @@ func readJSON(client Client) func(Requester) RIOE.ReaderIOResult[[]byte] {
// request := MakeGetRequest("https://api.example.com/user/1") // request := MakeGetRequest("https://api.example.com/user/1")
// readUser := ReadJSON[User](client) // readUser := ReadJSON[User](client)
// result := readUser(request)(context.Background())() // result := readUser(request)(context.Background())()
func ReadJSON[A any](client Client) func(Requester) RIOE.ReaderIOResult[A] { func ReadJSON[A any](client Client) RIOE.Kleisli[Requester, A] {
return F.Flow2( return F.Flow2(
readJSON(client), readJSON(client),
RIOE.ChainEitherK(J.Unmarshal[A]), RIOE.ChainEitherK(J.Unmarshal[A]),

View File

@@ -0,0 +1,732 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package readerioresult provides logging utilities for ReaderIOResult computations.
// It includes functions for entry/exit logging with timing, correlation IDs, and context management.
package readerioresult
import (
"context"
"log/slog"
"sync/atomic"
"time"
"github.com/IBM/fp-go/v2/context/readerio"
F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/io"
"github.com/IBM/fp-go/v2/logging"
"github.com/IBM/fp-go/v2/option"
"github.com/IBM/fp-go/v2/reader"
"github.com/IBM/fp-go/v2/result"
)
type (
// loggingContextKeyType is the type used as a key for storing logging information in context.Context
loggingContextKeyType int
// LoggingID is a unique identifier assigned to each logged operation for correlation
LoggingID uint64
// loggingContext holds the logging state for a computation, including timing,
// correlation ID, logger instance, and whether logging is enabled.
loggingContext struct {
contextID LoggingID // Unique identifier for this logged operation
startTime time.Time // When the operation started (for duration calculation)
logger *slog.Logger // The logger instance to use for this operation
isEnabled bool // Whether logging is enabled for this operation
}
)
var (
// loggingContextKey is the singleton key used to store/retrieve logging data from context
loggingContextKey loggingContextKeyType
// loggingCounter is an atomic counter that generates unique LoggingIDs
loggingCounter atomic.Uint64
loggingContextValue = F.Bind2nd(context.Context.Value, any(loggingContextKey))
withLoggingContextValue = F.Bind2of3(context.WithValue)(any(loggingContextKey))
// getLoggingContext retrieves the logging information (start time and ID) from the context.
// It returns a Pair containing the start time and the logging ID.
// This function assumes the context contains logging information; it will panic if not present.
getLoggingContext = F.Flow3(
loggingContextValue,
option.ToType[loggingContext],
option.GetOrElse(getDefaultLoggingContext),
)
)
// getDefaultLoggingContext returns a default logging context with the global logger.
// This is used when no logging context is found in the context.Context.
func getDefaultLoggingContext() loggingContext {
return loggingContext{
logger: logging.GetLogger(),
}
}
// withLoggingContext creates an endomorphism that adds a logging context to a context.Context.
// This is used internally to store logging state in the context for retrieval by nested operations.
//
// Parameters:
// - lctx: The logging context to store
//
// Returns:
// - An endomorphism that adds the logging context to a context.Context
func withLoggingContext(lctx loggingContext) Endomorphism[context.Context] {
return F.Bind2nd(withLoggingContextValue, any(lctx))
}
// LogEntryExitF creates a customizable operator that wraps a ReaderIOResult computation with entry/exit callbacks.
//
// This is a more flexible version of LogEntryExit that allows you to provide custom callbacks for
// entry and exit events. The onEntry callback receives the current context and can return a modified
// context (e.g., with additional logging information). The onExit callback receives the computation
// result and can perform custom logging, metrics collection, or cleanup.
//
// The function uses the bracket pattern to ensure that:
// - The onEntry callback is executed before the computation starts
// - The computation runs with the context returned by onEntry
// - The onExit callback is executed after the computation completes (success or failure)
// - The original result is preserved and returned unchanged
// - Cleanup happens even if the computation fails
//
// Type Parameters:
// - A: The success type of the ReaderIOResult
// - ANY: The return type of the onExit callback (typically any)
//
// Parameters:
// - onEntry: A ReaderIO that receives the current context and returns a (possibly modified) context.
// This is executed before the computation starts. Use this for logging entry, adding context values,
// starting timers, or initialization logic.
// - onExit: A Kleisli function that receives the Result[A] and returns a ReaderIO[ANY].
// This is executed after the computation completes, regardless of success or failure.
// Use this for logging exit, recording metrics, cleanup, or finalization logic.
//
// Returns:
// - An Operator that wraps the ReaderIOResult computation with the custom entry/exit callbacks
//
// Example with custom context modification:
//
// type RequestID string
//
// logOp := LogEntryExitF[User, any](
// func(ctx context.Context) IO[context.Context] {
// return func() context.Context {
// reqID := RequestID(uuid.New().String())
// log.Printf("[%s] Starting operation", reqID)
// return context.WithValue(ctx, "requestID", reqID)
// }
// },
// func(res Result[User]) ReaderIO[any] {
// return func(ctx context.Context) IO[any] {
// return func() any {
// reqID := ctx.Value("requestID").(RequestID)
// return F.Pipe1(
// res,
// result.Fold(
// func(err error) any {
// log.Printf("[%s] Operation failed: %v", reqID, err)
// return nil
// },
// func(_ User) any {
// log.Printf("[%s] Operation succeeded", reqID)
// return nil
// },
// ),
// )
// }
// }
// },
// )
//
// wrapped := logOp(fetchUser(123))
//
// Example with metrics collection:
//
// import "github.com/prometheus/client_golang/prometheus"
//
// metricsOp := LogEntryExitF[Response, any](
// func(ctx context.Context) IO[context.Context] {
// return func() context.Context {
// requestCount.WithLabelValues("api_call", "started").Inc()
// return context.WithValue(ctx, "startTime", time.Now())
// }
// },
// func(res Result[Response]) ReaderIO[any] {
// return func(ctx context.Context) IO[any] {
// return func() any {
// startTime := ctx.Value("startTime").(time.Time)
// duration := time.Since(startTime).Seconds()
//
// return F.Pipe1(
// res,
// result.Fold(
// func(err error) any {
// requestCount.WithLabelValues("api_call", "error").Inc()
// requestDuration.WithLabelValues("api_call", "error").Observe(duration)
// return nil
// },
// func(_ Response) any {
// requestCount.WithLabelValues("api_call", "success").Inc()
// requestDuration.WithLabelValues("api_call", "success").Observe(duration)
// return nil
// },
// ),
// )
// }
// }
// },
// )
//
// Use Cases:
// - Custom context modification: Adding request IDs, trace IDs, or other context values
// - Structured logging: Integration with zap, logrus, or other structured loggers
// - Metrics collection: Recording operation durations, success/failure rates
// - Distributed tracing: OpenTelemetry, Jaeger integration
// - Custom monitoring: Application-specific monitoring and alerting
//
// Note: LogEntryExit is implemented using LogEntryExitF with standard logging and context management.
// Use LogEntryExitF when you need more control over the entry/exit behavior or context modification.
func LogEntryExitF[A, ANY any](
onEntry ReaderIO[context.Context],
onExit readerio.Kleisli[Result[A], ANY],
) Operator[A, A] {
bracket := F.Bind13of3(readerio.Bracket[context.Context, Result[A], ANY])(onEntry, func(newCtx context.Context, res Result[A]) ReaderIO[ANY] {
return readerio.FromIO(onExit(res)(newCtx)) // Get the exit callback for this result
})
return func(src ReaderIOResult[A]) ReaderIOResult[A] {
return bracket(F.Flow2(
src,
FromIOResult,
))
}
}
// onEntry creates a ReaderIO that handles the entry logging for an operation.
// It generates a unique logging ID, captures the start time, and logs the entry message.
// The logging context is stored in the context.Context for later retrieval.
//
// Parameters:
// - logLevel: The slog.Level to use for logging (e.g., slog.LevelInfo, slog.LevelDebug)
// - cb: Callback function to retrieve the logger from the context
// - nameAttr: The slog.Attr containing the operation name
//
// Returns:
// - A ReaderIO that prepares the context with logging information and logs the entry
func onEntry(
logLevel slog.Level,
cb func(context.Context) *slog.Logger,
nameAttr slog.Attr,
) ReaderIO[context.Context] {
return func(ctx context.Context) IO[context.Context] {
// logger
logger := cb(ctx)
return func() context.Context {
// check if the logger is enabled
if logger.Enabled(ctx, logLevel) {
// Generate unique logging ID and capture start time
contextID := LoggingID(loggingCounter.Add(1))
startTime := time.Now()
newLogger := logger.With("ID", contextID)
// log using ID
newLogger.LogAttrs(ctx, logLevel, "[entering]", nameAttr)
withCtx := withLoggingContext(loggingContext{
contextID: contextID,
startTime: startTime,
logger: newLogger,
isEnabled: true,
})
withLogger := logging.WithLogger(newLogger)
return withCtx(withLogger(ctx))
}
// logging disabled
withCtx := withLoggingContext(loggingContext{
logger: logger,
isEnabled: false,
})
return withCtx(ctx)
}
}
}
// onExitAny creates a Kleisli function that handles exit logging for an operation.
// It logs either success or error based on the Result, including the operation duration.
// Only logs if logging was enabled during entry (checked via loggingContext.isEnabled).
//
// Parameters:
// - logLevel: The slog.Level to use for logging
// - nameAttr: The slog.Attr containing the operation name
//
// Returns:
// - A Kleisli function that logs the exit/error and returns nil
func onExitAny(
logLevel slog.Level,
nameAttr slog.Attr,
) readerio.Kleisli[Result[any], any] {
return func(res Result[any]) ReaderIO[any] {
return func(ctx context.Context) IO[any] {
value := getLoggingContext(ctx)
if value.isEnabled {
return func() any {
// Retrieve logging information from context
durationAttr := slog.Duration("duration", time.Since(value.startTime))
// Log error with ID and duration
onError := func(err error) any {
value.logger.LogAttrs(ctx, logLevel, "[throwing]",
nameAttr,
durationAttr,
slog.Any("error", err))
return nil
}
// Log success with ID and duration
onSuccess := func(_ any) any {
value.logger.LogAttrs(ctx, logLevel, "[exiting ]", nameAttr, durationAttr)
return nil
}
return F.Pipe1(
res,
result.Fold(onError, onSuccess),
)
}
}
// nothing to do
return io.Of[any](nil)
}
}
}
// LogEntryExitWithCallback creates an operator that logs entry and exit of a ReaderIOResult computation
// using a custom logger callback and log level. This provides more control than LogEntryExit.
//
// This function allows you to:
// - Use a custom log level (Debug, Info, Warn, Error)
// - Retrieve the logger from the context using a custom callback
// - Control whether logging is enabled based on the logger's configuration
//
// Type Parameters:
// - A: The success type of the ReaderIOResult
//
// Parameters:
// - logLevel: The slog.Level to use for all log messages (entry, exit, error)
// - cb: Callback function to retrieve the *slog.Logger from the context
// - name: A descriptive name for the operation
//
// Returns:
// - An Operator that wraps the ReaderIOResult with customizable logging
//
// Example with custom log level:
//
// // Log at debug level
// debugOp := LogEntryExitWithCallback[User](
// slog.LevelDebug,
// logging.GetLoggerFromContext,
// "fetchUser",
// )
// result := debugOp(fetchUser(123))
//
// Example with custom logger callback:
//
// type loggerKey int
// const myLoggerKey loggerKey = 0
//
// getMyLogger := func(ctx context.Context) *slog.Logger {
// if logger := ctx.Value(myLoggerKey); logger != nil {
// return logger.(*slog.Logger)
// }
// return slog.Default()
// }
//
// customOp := LogEntryExitWithCallback[Data](
// slog.LevelInfo,
// getMyLogger,
// "processData",
// )
func LogEntryExitWithCallback[A any](
logLevel slog.Level,
cb func(context.Context) *slog.Logger,
name string) Operator[A, A] {
nameAttr := slog.String("name", name)
return LogEntryExitF(
onEntry(logLevel, cb, nameAttr),
F.Flow2(
result.MapTo[A, any](nil),
onExitAny(logLevel, nameAttr),
),
)
}
// LogEntryExit creates an operator that logs the entry and exit of a ReaderIOResult computation with timing and correlation IDs.
//
// This function wraps a ReaderIOResult computation with automatic logging that tracks:
// - Entry: Logs when the computation starts with "[entering <id>] <name>"
// - Exit: Logs when the computation completes successfully with "[exiting <id>] <name> [duration]"
// - Error: Logs when the computation fails with "[throwing <id>] <name> [duration]: <error>"
//
// Each logged operation is assigned a unique LoggingID (a monotonically increasing counter) that
// appears in all log messages for that operation. This ID enables correlation of entry and exit
// logs, even when multiple operations are running concurrently or are interleaved.
//
// The logging information (start time and ID) is stored in the context and can be retrieved using
// getLoggingContext or getLoggingID. This allows nested operations to access the parent operation's
// logging information.
//
// Type Parameters:
// - A: The success type of the ReaderIOResult
//
// Parameters:
// - name: A descriptive name for the computation, used in log messages to identify the operation
//
// Returns:
// - An Operator that wraps the ReaderIOResult computation with entry/exit logging
//
// The function uses the bracket pattern to ensure that:
// - Entry is logged before the computation starts
// - A unique LoggingID is assigned and stored in the context
// - Exit/error is logged after the computation completes, regardless of success or failure
// - Timing is accurate, measuring from entry to exit
// - The original result is preserved and returned unchanged
//
// Log Format:
// - Entry: "[entering <id>] <name>"
// - Success: "[exiting <id>] <name> [<duration>s]"
// - Error: "[throwing <id>] <name> [<duration>s]: <error>"
//
// Example with successful computation:
//
// fetchUser := func(id int) ReaderIOResult[User] {
// return Of(User{ID: id, Name: "Alice"})
// }
//
// // Wrap with logging
// loggedFetch := LogEntryExit[User]("fetchUser")(fetchUser(123))
//
// // Execute
// result := loggedFetch(context.Background())()
// // Logs:
// // [entering 1] fetchUser
// // [exiting 1] fetchUser [0.1s]
//
// Example with error:
//
// failingOp := func() ReaderIOResult[string] {
// return Left[string](errors.New("connection timeout"))
// }
//
// logged := LogEntryExit[string]("failingOp")(failingOp())
// result := logged(context.Background())()
// // Logs:
// // [entering 2] failingOp
// // [throwing 2] failingOp [0.0s]: connection timeout
//
// Example with nested operations:
//
// fetchOrders := func(userID int) ReaderIOResult[[]Order] {
// return Of([]Order{{ID: 1}})
// }
//
// pipeline := F.Pipe3(
// fetchUser(123),
// LogEntryExit[User]("fetchUser"),
// Chain(func(user User) ReaderIOResult[[]Order] {
// return fetchOrders(user.ID)
// }),
// LogEntryExit[[]Order]("fetchOrders"),
// )
//
// result := pipeline(context.Background())()
// // Logs:
// // [entering 3] fetchUser
// // [exiting 3] fetchUser [0.1s]
// // [entering 4] fetchOrders
// // [exiting 4] fetchOrders [0.2s]
//
// Example with concurrent operations:
//
// // Multiple operations can run concurrently, each with unique IDs
// op1 := LogEntryExit[Data]("operation1")(fetchData(1))
// op2 := LogEntryExit[Data]("operation2")(fetchData(2))
//
// go op1(context.Background())()
// go op2(context.Background())()
// // Logs (order may vary):
// // [entering 5] operation1
// // [entering 6] operation2
// // [exiting 5] operation1 [0.1s]
// // [exiting 6] operation2 [0.2s]
// // The IDs allow correlation even when logs are interleaved
//
// Use Cases:
// - Debugging: Track execution flow through complex ReaderIOResult chains with correlation IDs
// - Performance monitoring: Identify slow operations with timing information
// - Production logging: Monitor critical operations with unique identifiers
// - Concurrent operations: Correlate logs from multiple concurrent operations
// - Nested operations: Track parent-child relationships in operation hierarchies
// - Troubleshooting: Quickly identify where errors occur and correlate with entry logs
//
//go:inline
func LogEntryExit[A any](name string) Operator[A, A] {
return LogEntryExitWithCallback[A](slog.LevelInfo, logging.GetLoggerFromContext, name)
}
func curriedLog(
logLevel slog.Level,
cb func(context.Context) *slog.Logger,
message string) func(slog.Attr) func(context.Context) func() struct{} {
return F.Curry2(func(a slog.Attr, ctx context.Context) func() struct{} {
logger := cb(ctx)
return func() struct{} {
logger.LogAttrs(ctx, logLevel, message, a)
return struct{}{}
}
})
}
// SLogWithCallback creates a Kleisli arrow that logs a Result value (success or error) with a custom logger and log level.
//
// This function logs both successful values and errors, making it useful for debugging and monitoring
// Result values as they flow through a computation. Unlike TapSLog which only logs successful values,
// SLogWithCallback logs the Result regardless of whether it contains a value or an error.
//
// The logged output includes:
// - For success: The message with the value as a structured "value" attribute
// - For error: The message with the error as a structured "error" attribute
//
// The Result is passed through unchanged after logging.
//
// Type Parameters:
// - A: The success type of the Result
//
// Parameters:
// - logLevel: The slog.Level to use for logging (e.g., slog.LevelInfo, slog.LevelDebug)
// - cb: Callback function to retrieve the *slog.Logger from the context
// - message: A descriptive message to include in the log entry
//
// Returns:
// - A Kleisli arrow that logs the Result (value or error) and returns it unchanged
//
// Example with custom log level:
//
// debugLog := SLogWithCallback[User](
// slog.LevelDebug,
// logging.GetLoggerFromContext,
// "User result",
// )
//
// pipeline := F.Pipe2(
// fetchUser(123),
// Chain(debugLog),
// Map(func(u User) string { return u.Name }),
// )
//
// Example with custom logger:
//
// type loggerKey int
// const myLoggerKey loggerKey = 0
//
// getMyLogger := func(ctx context.Context) *slog.Logger {
// if logger := ctx.Value(myLoggerKey); logger != nil {
// return logger.(*slog.Logger)
// }
// return slog.Default()
// }
//
// customLog := SLogWithCallback[Data](
// slog.LevelWarn,
// getMyLogger,
// "Data processing result",
// )
//
// Use Cases:
// - Debugging: Log both successful and failed Results in a pipeline
// - Error tracking: Monitor error occurrences with custom log levels
// - Custom logging: Use application-specific loggers and log levels
// - Conditional logging: Enable/disable logging based on logger configuration
func SLogWithCallback[A any](
logLevel slog.Level,
cb func(context.Context) *slog.Logger,
message string) Kleisli[Result[A], A] {
return F.Pipe1(
F.Flow2(
// create the attribute to log depending on the condition
result.ToSLogAttr[A](),
// create an `IO` that logs the attribute
curriedLog(logLevel, cb, message),
),
// preserve the original context
reader.Chain(reader.Sequence(readerio.MapTo[struct{}, Result[A]])),
)
}
// SLog creates a Kleisli arrow that logs a Result value (success or error) with a message.
//
// This function logs both successful values and errors at Info level using the logger from the context.
// It's a convenience wrapper around SLogWithCallback with standard settings.
//
// The logged output includes:
// - For success: The message with the value as a structured "value" attribute
// - For error: The message with the error as a structured "error" attribute
//
// The Result is passed through unchanged after logging, making this function transparent in the
// computation pipeline.
//
// Type Parameters:
// - A: The success type of the Result
//
// Parameters:
// - message: A descriptive message to include in the log entry
//
// Returns:
// - A Kleisli arrow that logs the Result (value or error) and returns it unchanged
//
// Example with successful Result:
//
// pipeline := F.Pipe2(
// fetchUser(123),
// Chain(SLog[User]("Fetched user")),
// Map(func(u User) string { return u.Name }),
// )
//
// result := pipeline(context.Background())()
// // If successful, logs: "Fetched user" value={ID:123 Name:"Alice"}
// // If error, logs: "Fetched user" error="user not found"
//
// Example in error handling pipeline:
//
// pipeline := F.Pipe3(
// fetchData(id),
// Chain(SLog[Data]("Data fetched")),
// Chain(validateData),
// Chain(SLog[Data]("Data validated")),
// Chain(processData),
// )
//
// // Logs each step, including errors:
// // "Data fetched" value={...} or error="..."
// // "Data validated" value={...} or error="..."
//
// Use Cases:
// - Debugging: Track both successful and failed Results in a pipeline
// - Error monitoring: Log errors as they occur in the computation
// - Flow tracking: See the progression of Results through a pipeline
// - Troubleshooting: Identify where errors are introduced or propagated
//
// Note: This function logs the Result itself (which may contain an error), not just successful values.
// For logging only successful values, use TapSLog instead.
//
//go:inline
func SLog[A any](message string) Kleisli[Result[A], A] {
return SLogWithCallback[A](slog.LevelInfo, logging.GetLoggerFromContext, message)
}
// TapSLog creates an operator that logs only successful values with a message and passes them through unchanged.
//
// This function is useful for debugging and monitoring values as they flow through a ReaderIOResult
// computation chain. Unlike SLog which logs both successes and errors, TapSLog only logs when the
// computation is successful. If the computation contains an error, no logging occurs and the error
// is propagated unchanged.
//
// The logged output includes:
// - The provided message
// - The value being passed through (as a structured "value" attribute)
//
// Type Parameters:
// - A: The type of the value to log and pass through
//
// Parameters:
// - message: A descriptive message to include in the log entry
//
// Returns:
// - An Operator that logs successful values and returns them unchanged
//
// Example with simple value logging:
//
// fetchUser := func(id int) ReaderIOResult[User] {
// return Of(User{ID: id, Name: "Alice"})
// }
//
// pipeline := F.Pipe2(
// fetchUser(123),
// TapSLog[User]("Fetched user"),
// Map(func(u User) string { return u.Name }),
// )
//
// result := pipeline(context.Background())()
// // Logs: "Fetched user" value={ID:123 Name:"Alice"}
// // Returns: result.Of("Alice")
//
// Example in a processing pipeline:
//
// processOrder := F.Pipe4(
// fetchOrder(orderId),
// TapSLog[Order]("Order fetched"),
// Chain(validateOrder),
// TapSLog[Order]("Order validated"),
// Chain(processPayment),
// TapSLog[Payment]("Payment processed"),
// )
//
// result := processOrder(context.Background())()
// // Logs each successful step with the intermediate values
// // If any step fails, subsequent TapSLog calls don't log
//
// Example with error handling:
//
// pipeline := F.Pipe3(
// fetchData(id),
// TapSLog[Data]("Data fetched"),
// Chain(func(d Data) ReaderIOResult[Result] {
// if d.IsValid() {
// return Of(processData(d))
// }
// return Left[Result](errors.New("invalid data"))
// }),
// TapSLog[Result]("Data processed"),
// )
//
// // If fetchData succeeds: logs "Data fetched" with the data
// // If processing succeeds: logs "Data processed" with the result
// // If processing fails: "Data processed" is NOT logged (error propagates)
//
// Use Cases:
// - Debugging: Inspect intermediate successful values in a computation pipeline
// - Monitoring: Track successful data flow through complex operations
// - Troubleshooting: Identify where successful computations stop (last logged value before error)
// - Auditing: Log important successful values for compliance or security
// - Development: Understand data transformations during development
//
// Note: This function only logs successful values. Errors are silently propagated without logging.
// For logging both successes and errors, use SLog instead.
//
//go:inline
func TapSLog[A any](message string) Operator[A, A] {
return readerio.ChainFirst(SLog[A](message))
}

View File

@@ -0,0 +1,662 @@
package readerioresult
import (
"bytes"
"context"
"errors"
"log/slog"
"strconv"
"strings"
"testing"
"time"
F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/logging"
N "github.com/IBM/fp-go/v2/number"
"github.com/IBM/fp-go/v2/result"
S "github.com/IBM/fp-go/v2/string"
"github.com/stretchr/testify/assert"
)
// TestLoggingContext tests basic nested logging with correlation IDs
func TestLoggingContext(t *testing.T) {
data := F.Pipe2(
Of("Sample"),
LogEntryExit[string]("TestLoggingContext1"),
LogEntryExit[string]("TestLoggingContext2"),
)
assert.Equal(t, result.Of("Sample"), data(context.Background())())
}
// TestLogEntryExitSuccess tests successful operation logging
func TestLogEntryExitSuccess(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
operation := F.Pipe1(
Of("success value"),
LogEntryExit[string]("TestOperation"),
)
res := operation(context.Background())()
assert.Equal(t, result.Of("success value"), res)
logOutput := buf.String()
assert.Contains(t, logOutput, "[entering]")
assert.Contains(t, logOutput, "[exiting ]")
assert.Contains(t, logOutput, "TestOperation")
assert.Contains(t, logOutput, "ID=")
assert.Contains(t, logOutput, "duration=")
}
// TestLogEntryExitError tests error operation logging
func TestLogEntryExitError(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
testErr := errors.New("test error")
operation := F.Pipe1(
Left[string](testErr),
LogEntryExit[string]("FailingOperation"),
)
res := operation(context.Background())()
assert.True(t, result.IsLeft(res))
logOutput := buf.String()
assert.Contains(t, logOutput, "[entering]")
assert.Contains(t, logOutput, "[throwing]")
assert.Contains(t, logOutput, "FailingOperation")
assert.Contains(t, logOutput, "test error")
assert.Contains(t, logOutput, "ID=")
assert.Contains(t, logOutput, "duration=")
}
// TestLogEntryExitNested tests nested operations with different IDs
func TestLogEntryExitNested(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
innerOp := F.Pipe1(
Of("inner"),
LogEntryExit[string]("InnerOp"),
)
outerOp := F.Pipe2(
Of("outer"),
LogEntryExit[string]("OuterOp"),
Chain(func(s string) ReaderIOResult[string] {
return innerOp
}),
)
res := outerOp(context.Background())()
assert.True(t, result.IsRight(res))
logOutput := buf.String()
// Should have two different IDs
assert.Contains(t, logOutput, "OuterOp")
assert.Contains(t, logOutput, "InnerOp")
// Count entering and exiting logs
enterCount := strings.Count(logOutput, "[entering]")
exitCount := strings.Count(logOutput, "[exiting ]")
assert.Equal(t, 2, enterCount, "Should have 2 entering logs")
assert.Equal(t, 2, exitCount, "Should have 2 exiting logs")
}
// TestLogEntryExitWithCallback tests custom log level and callback
func TestLogEntryExitWithCallback(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelDebug,
}))
customCallback := func(ctx context.Context) *slog.Logger {
return logger
}
operation := F.Pipe1(
Of(42),
LogEntryExitWithCallback[int](slog.LevelDebug, customCallback, "DebugOperation"),
)
res := operation(context.Background())()
assert.Equal(t, result.Of(42), res)
logOutput := buf.String()
assert.Contains(t, logOutput, "[entering]")
assert.Contains(t, logOutput, "[exiting ]")
assert.Contains(t, logOutput, "DebugOperation")
assert.Contains(t, logOutput, "level=DEBUG")
}
// TestLogEntryExitDisabled tests that logging can be disabled
func TestLogEntryExitDisabled(t *testing.T) {
var buf bytes.Buffer
// Create logger with level that disables info logs
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelError, // Only log errors
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
operation := F.Pipe1(
Of("value"),
LogEntryExit[string]("DisabledOperation"),
)
res := operation(context.Background())()
assert.True(t, result.IsRight(res))
// Should have no logs since level is ERROR
logOutput := buf.String()
assert.Empty(t, logOutput, "Should have no logs when logging is disabled")
}
// TestLogEntryExitF tests custom entry/exit callbacks
func TestLogEntryExitF(t *testing.T) {
var entryCount, exitCount int
onEntry := func(ctx context.Context) IO[context.Context] {
return func() context.Context {
entryCount++
return ctx
}
}
onExit := func(res Result[string]) ReaderIO[any] {
return func(ctx context.Context) IO[any] {
return func() any {
exitCount++
return nil
}
}
}
operation := F.Pipe1(
Of("test"),
LogEntryExitF(onEntry, onExit),
)
res := operation(context.Background())()
assert.True(t, result.IsRight(res))
assert.Equal(t, 1, entryCount, "Entry callback should be called once")
assert.Equal(t, 1, exitCount, "Exit callback should be called once")
}
// TestLogEntryExitFWithError tests custom callbacks with error
func TestLogEntryExitFWithError(t *testing.T) {
var entryCount, exitCount int
var capturedError error
onEntry := func(ctx context.Context) IO[context.Context] {
return func() context.Context {
entryCount++
return ctx
}
}
onExit := func(res Result[string]) ReaderIO[any] {
return func(ctx context.Context) IO[any] {
return func() any {
exitCount++
if result.IsLeft(res) {
_, capturedError = result.Unwrap(res)
}
return nil
}
}
}
testErr := errors.New("custom error")
operation := F.Pipe1(
Left[string](testErr),
LogEntryExitF(onEntry, onExit),
)
res := operation(context.Background())()
assert.True(t, result.IsLeft(res))
assert.Equal(t, 1, entryCount, "Entry callback should be called once")
assert.Equal(t, 1, exitCount, "Exit callback should be called once")
assert.Equal(t, testErr, capturedError, "Should capture the error")
}
// TestLoggingIDUniqueness tests that logging IDs are unique
func TestLoggingIDUniqueness(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
// Run multiple operations
for i := range 5 {
op := F.Pipe1(
Of(i),
LogEntryExit[int]("Operation"),
)
op(context.Background())()
}
logOutput := buf.String()
// Extract all IDs and verify they're unique
lines := strings.Split(logOutput, "\n")
ids := make(map[string]bool)
for _, line := range lines {
if strings.Contains(line, "ID=") {
// Extract ID value
parts := strings.Split(line, "ID=")
if len(parts) > 1 {
idPart := strings.Fields(parts[1])[0]
ids[idPart] = true
}
}
}
// Should have 5 unique IDs (one per operation)
assert.GreaterOrEqual(t, len(ids), 5, "Should have at least 5 unique IDs")
}
// TestLogEntryExitWithContextLogger tests using logger from context
func TestLogEntryExitWithContextLogger(t *testing.T) {
var buf bytes.Buffer
contextLogger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
ctx := logging.WithLogger(contextLogger)(context.Background())
operation := F.Pipe1(
Of("context value"),
LogEntryExit[string]("ContextOperation"),
)
res := operation(ctx)()
assert.True(t, result.IsRight(res))
logOutput := buf.String()
assert.Contains(t, logOutput, "[entering]")
assert.Contains(t, logOutput, "[exiting ]")
assert.Contains(t, logOutput, "ContextOperation")
}
// TestLogEntryExitTiming tests that duration is captured
func TestLogEntryExitTiming(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
// Operation with delay
slowOp := func(ctx context.Context) IOResult[string] {
return func() Result[string] {
time.Sleep(10 * time.Millisecond)
return result.Of("done")
}
}
operation := F.Pipe1(
slowOp,
LogEntryExit[string]("SlowOperation"),
)
res := operation(context.Background())()
assert.True(t, result.IsRight(res))
logOutput := buf.String()
assert.Contains(t, logOutput, "duration=")
// Verify duration is present in exit log
lines := strings.Split(logOutput, "\n")
var foundDuration bool
for _, line := range lines {
if strings.Contains(line, "[exiting ]") && strings.Contains(line, "duration=") {
foundDuration = true
break
}
}
assert.True(t, foundDuration, "Exit log should contain duration")
}
// TestLogEntryExitChainedOperations tests complex chained operations
func TestLogEntryExitChainedOperations(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
step1 := F.Pipe1(
Of(1),
LogEntryExit[int]("Step1"),
)
step2 := F.Flow3(
N.Mul(2),
Of,
LogEntryExit[int]("Step2"),
)
step3 := F.Flow3(
strconv.Itoa,
Of,
LogEntryExit[string]("Step3"),
)
pipeline := F.Pipe1(
step1,
Chain(F.Flow2(
step2,
Chain(step3),
)),
)
res := pipeline(context.Background())()
assert.Equal(t, result.Of("2"), res)
logOutput := buf.String()
assert.Contains(t, logOutput, "Step1")
assert.Contains(t, logOutput, "Step2")
assert.Contains(t, logOutput, "Step3")
// Verify all steps completed
assert.Equal(t, 3, strings.Count(logOutput, "[entering]"))
assert.Equal(t, 3, strings.Count(logOutput, "[exiting ]"))
}
// TestTapSLog tests basic TapSLog functionality
func TestTapSLog(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
operation := F.Pipe2(
Of(42),
TapSLog[int]("Processing value"),
Map(N.Mul(2)),
)
res := operation(context.Background())()
assert.Equal(t, result.Of(84), res)
logOutput := buf.String()
assert.Contains(t, logOutput, "Processing value")
assert.Contains(t, logOutput, "value=42")
}
// TestTapSLogInPipeline tests TapSLog in a multi-step pipeline
func TestTapSLogInPipeline(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
step1 := F.Pipe2(
Of("hello"),
TapSLog[string]("Step 1: Initial value"),
Map(func(s string) string { return s + " world" }),
)
step2 := F.Pipe2(
step1,
TapSLog[string]("Step 2: After concatenation"),
Map(S.Size),
)
pipeline := F.Pipe1(
step2,
TapSLog[int]("Step 3: Final length"),
)
res := pipeline(context.Background())()
assert.Equal(t, result.Of(11), res)
logOutput := buf.String()
assert.Contains(t, logOutput, "Step 1: Initial value")
assert.Contains(t, logOutput, "value=hello")
assert.Contains(t, logOutput, "Step 2: After concatenation")
assert.Contains(t, logOutput, `value="hello world"`)
assert.Contains(t, logOutput, "Step 3: Final length")
assert.Contains(t, logOutput, "value=11")
}
// TestTapSLogWithError tests that TapSLog logs errors (via SLog)
func TestTapSLogWithError(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
testErr := errors.New("computation failed")
pipeline := F.Pipe2(
Left[int](testErr),
TapSLog[int]("Error logged"),
Map(N.Mul(2)),
)
res := pipeline(context.Background())()
assert.True(t, result.IsLeft(res))
logOutput := buf.String()
// TapSLog uses SLog internally, which logs both successes and errors
assert.Contains(t, logOutput, "Error logged")
assert.Contains(t, logOutput, "error")
assert.Contains(t, logOutput, "computation failed")
}
// TestTapSLogWithStruct tests TapSLog with structured data
func TestTapSLogWithStruct(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
type User struct {
ID int
Name string
}
user := User{ID: 123, Name: "Alice"}
operation := F.Pipe2(
Of(user),
TapSLog[User]("User data"),
Map(func(u User) string { return u.Name }),
)
res := operation(context.Background())()
assert.Equal(t, result.Of("Alice"), res)
logOutput := buf.String()
assert.Contains(t, logOutput, "User data")
assert.Contains(t, logOutput, "ID:123")
assert.Contains(t, logOutput, "Name:Alice")
}
// TestTapSLogDisabled tests that TapSLog respects logger level
func TestTapSLogDisabled(t *testing.T) {
var buf bytes.Buffer
// Create logger with level that disables info logs
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelError, // Only log errors
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
operation := F.Pipe2(
Of(42),
TapSLog[int]("This should not be logged"),
Map(N.Mul(2)),
)
res := operation(context.Background())()
assert.Equal(t, result.Of(84), res)
// Should have no logs since level is ERROR
logOutput := buf.String()
assert.Empty(t, logOutput, "Should have no logs when logging is disabled")
}
// TestTapSLogWithContextLogger tests TapSLog using logger from context
func TestTapSLogWithContextLogger(t *testing.T) {
var buf bytes.Buffer
contextLogger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
ctx := logging.WithLogger(contextLogger)(context.Background())
operation := F.Pipe2(
Of("test value"),
TapSLog[string]("Context logger test"),
Map(S.Size),
)
res := operation(ctx)()
assert.Equal(t, result.Of(10), res)
logOutput := buf.String()
assert.Contains(t, logOutput, "Context logger test")
assert.Contains(t, logOutput, `value="test value"`)
}
// TestSLogLogsSuccessValue tests that SLog logs successful Result values
func TestSLogLogsSuccessValue(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
ctx := context.Background()
// Create a Result and log it
res1 := result.Of(42)
logged := SLog[int]("Result value")(res1)(ctx)()
assert.Equal(t, result.Of(42), logged)
logOutput := buf.String()
assert.Contains(t, logOutput, "Result value")
assert.Contains(t, logOutput, "value=42")
}
// TestSLogLogsErrorValue tests that SLog logs error Result values
func TestSLogLogsErrorValue(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
ctx := context.Background()
testErr := errors.New("test error")
// Create an error Result and log it
res1 := result.Left[int](testErr)
logged := SLog[int]("Result value")(res1)(ctx)()
assert.True(t, result.IsLeft(logged))
logOutput := buf.String()
assert.Contains(t, logOutput, "Result value")
assert.Contains(t, logOutput, "error")
assert.Contains(t, logOutput, "test error")
}
// TestSLogWithCallbackCustomLevel tests SLogWithCallback with custom log level
func TestSLogWithCallbackCustomLevel(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelDebug,
}))
customCallback := func(ctx context.Context) *slog.Logger {
return logger
}
ctx := context.Background()
// Create a Result and log it with custom callback
res1 := result.Of(42)
logged := SLogWithCallback[int](slog.LevelDebug, customCallback, "Debug result")(res1)(ctx)()
assert.Equal(t, result.Of(42), logged)
logOutput := buf.String()
assert.Contains(t, logOutput, "Debug result")
assert.Contains(t, logOutput, "value=42")
assert.Contains(t, logOutput, "level=DEBUG")
}
// TestSLogWithCallbackLogsError tests SLogWithCallback logs errors
func TestSLogWithCallbackLogsError(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelWarn,
}))
customCallback := func(ctx context.Context) *slog.Logger {
return logger
}
ctx := context.Background()
testErr := errors.New("warning error")
// Create an error Result and log it with custom callback
res1 := result.Left[int](testErr)
logged := SLogWithCallback[int](slog.LevelWarn, customCallback, "Warning result")(res1)(ctx)()
assert.True(t, result.IsLeft(logged))
logOutput := buf.String()
assert.Contains(t, logOutput, "Warning result")
assert.Contains(t, logOutput, "error")
assert.Contains(t, logOutput, "warning error")
assert.Contains(t, logOutput, "level=WARN")
}

View File

@@ -19,6 +19,7 @@ import (
"context" "context"
"time" "time"
"github.com/IBM/fp-go/v2/context/readerio"
"github.com/IBM/fp-go/v2/context/readerresult" "github.com/IBM/fp-go/v2/context/readerresult"
"github.com/IBM/fp-go/v2/either" "github.com/IBM/fp-go/v2/either"
"github.com/IBM/fp-go/v2/errors" "github.com/IBM/fp-go/v2/errors"
@@ -26,10 +27,11 @@ import (
"github.com/IBM/fp-go/v2/io" "github.com/IBM/fp-go/v2/io"
"github.com/IBM/fp-go/v2/ioeither" "github.com/IBM/fp-go/v2/ioeither"
"github.com/IBM/fp-go/v2/ioresult" "github.com/IBM/fp-go/v2/ioresult"
"github.com/IBM/fp-go/v2/option"
"github.com/IBM/fp-go/v2/reader" "github.com/IBM/fp-go/v2/reader"
"github.com/IBM/fp-go/v2/readerio"
RIOR "github.com/IBM/fp-go/v2/readerioresult" RIOR "github.com/IBM/fp-go/v2/readerioresult"
"github.com/IBM/fp-go/v2/readeroption" "github.com/IBM/fp-go/v2/readeroption"
"github.com/IBM/fp-go/v2/result"
) )
const ( const (
@@ -150,7 +152,7 @@ func MapTo[A, B any](b B) Operator[A, B] {
// //
//go:inline //go:inline
func MonadChain[A, B any](ma ReaderIOResult[A], f Kleisli[A, B]) ReaderIOResult[B] { func MonadChain[A, B any](ma ReaderIOResult[A], f Kleisli[A, B]) ReaderIOResult[B] {
return RIOR.MonadChain(ma, f) return RIOR.MonadChain(ma, WithContextK(f))
} }
// Chain sequences two [ReaderIOResult] computations, where the second depends on the result of the first. // Chain sequences two [ReaderIOResult] computations, where the second depends on the result of the first.
@@ -163,7 +165,7 @@ func MonadChain[A, B any](ma ReaderIOResult[A], f Kleisli[A, B]) ReaderIOResult[
// //
//go:inline //go:inline
func Chain[A, B any](f Kleisli[A, B]) Operator[A, B] { func Chain[A, B any](f Kleisli[A, B]) Operator[A, B] {
return RIOR.Chain(f) return RIOR.Chain(WithContextK(f))
} }
// MonadChainFirst sequences two [ReaderIOResult] computations but returns the result of the first. // MonadChainFirst sequences two [ReaderIOResult] computations but returns the result of the first.
@@ -177,7 +179,12 @@ func Chain[A, B any](f Kleisli[A, B]) Operator[A, B] {
// //
//go:inline //go:inline
func MonadChainFirst[A, B any](ma ReaderIOResult[A], f Kleisli[A, B]) ReaderIOResult[A] { func MonadChainFirst[A, B any](ma ReaderIOResult[A], f Kleisli[A, B]) ReaderIOResult[A] {
return RIOR.MonadChainFirst(ma, f) return RIOR.MonadChainFirst(ma, WithContextK(f))
}
//go:inline
func MonadTap[A, B any](ma ReaderIOResult[A], f Kleisli[A, B]) ReaderIOResult[A] {
return RIOR.MonadTap(ma, WithContextK(f))
} }
// ChainFirst sequences two [ReaderIOResult] computations but returns the result of the first. // ChainFirst sequences two [ReaderIOResult] computations but returns the result of the first.
@@ -190,7 +197,12 @@ func MonadChainFirst[A, B any](ma ReaderIOResult[A], f Kleisli[A, B]) ReaderIORe
// //
//go:inline //go:inline
func ChainFirst[A, B any](f Kleisli[A, B]) Operator[A, A] { func ChainFirst[A, B any](f Kleisli[A, B]) Operator[A, A] {
return RIOR.ChainFirst(f) return RIOR.ChainFirst(WithContextK(f))
}
//go:inline
func Tap[A, B any](f Kleisli[A, B]) Operator[A, A] {
return RIOR.Tap(WithContextK(f))
} }
// Of creates a [ReaderIOResult] that always succeeds with the given value. // Of creates a [ReaderIOResult] that always succeeds with the given value.
@@ -233,14 +245,14 @@ func MonadApPar[B, A any](fab ReaderIOResult[func(A) B], fa ReaderIOResult[A]) R
return func(ctx context.Context) IOResult[B] { return func(ctx context.Context) IOResult[B] {
// quick check for cancellation // quick check for cancellation
if err := context.Cause(ctx); err != nil { if ctx.Err() != nil {
return ioeither.Left[B](err) return ioeither.Left[B](context.Cause(ctx))
} }
return func() Result[B] { return func() Result[B] {
// quick check for cancellation // quick check for cancellation
if err := context.Cause(ctx); err != nil { if ctx.Err() != nil {
return either.Left[B](err) return either.Left[B](context.Cause(ctx))
} }
// create sub-contexts for fa and fab, so they can cancel one other // create sub-contexts for fa and fab, so they can cancel one other
@@ -372,7 +384,7 @@ func Ask() ReaderIOResult[context.Context] {
// Returns a new ReaderIOResult with the chained computation. // Returns a new ReaderIOResult with the chained computation.
// //
//go:inline //go:inline
func MonadChainEitherK[A, B any](ma ReaderIOResult[A], f func(A) Either[B]) ReaderIOResult[B] { func MonadChainEitherK[A, B any](ma ReaderIOResult[A], f either.Kleisli[error, A, B]) ReaderIOResult[B] {
return RIOR.MonadChainEitherK(ma, f) return RIOR.MonadChainEitherK(ma, f)
} }
@@ -385,7 +397,12 @@ func MonadChainEitherK[A, B any](ma ReaderIOResult[A], f func(A) Either[B]) Read
// Returns a function that chains the Either-returning function. // Returns a function that chains the Either-returning function.
// //
//go:inline //go:inline
func ChainEitherK[A, B any](f func(A) Either[B]) Operator[A, B] { func ChainEitherK[A, B any](f either.Kleisli[error, A, B]) Operator[A, B] {
return RIOR.ChainEitherK[context.Context](f)
}
//go:inline
func ChainResultK[A, B any](f either.Kleisli[error, A, B]) Operator[A, B] {
return RIOR.ChainEitherK[context.Context](f) return RIOR.ChainEitherK[context.Context](f)
} }
@@ -399,10 +416,15 @@ func ChainEitherK[A, B any](f func(A) Either[B]) Operator[A, B] {
// Returns a ReaderIOResult with the original value if both computations succeed. // Returns a ReaderIOResult with the original value if both computations succeed.
// //
//go:inline //go:inline
func MonadChainFirstEitherK[A, B any](ma ReaderIOResult[A], f func(A) Either[B]) ReaderIOResult[A] { func MonadChainFirstEitherK[A, B any](ma ReaderIOResult[A], f either.Kleisli[error, A, B]) ReaderIOResult[A] {
return RIOR.MonadChainFirstEitherK(ma, f) return RIOR.MonadChainFirstEitherK(ma, f)
} }
//go:inline
func MonadTapEitherK[A, B any](ma ReaderIOResult[A], f either.Kleisli[error, A, B]) ReaderIOResult[A] {
return RIOR.MonadTapEitherK(ma, f)
}
// ChainFirstEitherK chains a function that returns an [Either] but keeps the original value. // ChainFirstEitherK chains a function that returns an [Either] but keeps the original value.
// This is the curried version of [MonadChainFirstEitherK]. // This is the curried version of [MonadChainFirstEitherK].
// //
@@ -412,10 +434,15 @@ func MonadChainFirstEitherK[A, B any](ma ReaderIOResult[A], f func(A) Either[B])
// Returns a function that chains the Either-returning function. // Returns a function that chains the Either-returning function.
// //
//go:inline //go:inline
func ChainFirstEitherK[A, B any](f func(A) Either[B]) Operator[A, A] { func ChainFirstEitherK[A, B any](f either.Kleisli[error, A, B]) Operator[A, A] {
return RIOR.ChainFirstEitherK[context.Context](f) return RIOR.ChainFirstEitherK[context.Context](f)
} }
//go:inline
func TapEitherK[A, B any](f either.Kleisli[error, A, B]) Operator[A, A] {
return RIOR.TapEitherK[context.Context](f)
}
// ChainOptionK chains a function that returns an [Option] into a [ReaderIOResult] computation. // ChainOptionK chains a function that returns an [Option] into a [ReaderIOResult] computation.
// If the Option is None, the provided error function is called. // If the Option is None, the provided error function is called.
// //
@@ -425,7 +452,7 @@ func ChainFirstEitherK[A, B any](f func(A) Either[B]) Operator[A, A] {
// Returns a function that chains Option-returning functions into ReaderIOResult. // Returns a function that chains Option-returning functions into ReaderIOResult.
// //
//go:inline //go:inline
func ChainOptionK[A, B any](onNone func() error) func(func(A) Option[B]) Operator[A, B] { func ChainOptionK[A, B any](onNone func() error) func(option.Kleisli[A, B]) Operator[A, B] {
return RIOR.ChainOptionK[context.Context, A, B](onNone) return RIOR.ChainOptionK[context.Context, A, B](onNone)
} }
@@ -507,7 +534,7 @@ func Never[A any]() ReaderIOResult[A] {
// Returns a new ReaderIOResult with the chained IO computation. // Returns a new ReaderIOResult with the chained IO computation.
// //
//go:inline //go:inline
func MonadChainIOK[A, B any](ma ReaderIOResult[A], f func(A) IO[B]) ReaderIOResult[B] { func MonadChainIOK[A, B any](ma ReaderIOResult[A], f io.Kleisli[A, B]) ReaderIOResult[B] {
return RIOR.MonadChainIOK(ma, f) return RIOR.MonadChainIOK(ma, f)
} }
@@ -520,7 +547,7 @@ func MonadChainIOK[A, B any](ma ReaderIOResult[A], f func(A) IO[B]) ReaderIOResu
// Returns a function that chains the IO-returning function. // Returns a function that chains the IO-returning function.
// //
//go:inline //go:inline
func ChainIOK[A, B any](f func(A) IO[B]) Operator[A, B] { func ChainIOK[A, B any](f io.Kleisli[A, B]) Operator[A, B] {
return RIOR.ChainIOK[context.Context](f) return RIOR.ChainIOK[context.Context](f)
} }
@@ -534,10 +561,15 @@ func ChainIOK[A, B any](f func(A) IO[B]) Operator[A, B] {
// Returns a ReaderIOResult with the original value after executing the IO. // Returns a ReaderIOResult with the original value after executing the IO.
// //
//go:inline //go:inline
func MonadChainFirstIOK[A, B any](ma ReaderIOResult[A], f func(A) IO[B]) ReaderIOResult[A] { func MonadChainFirstIOK[A, B any](ma ReaderIOResult[A], f io.Kleisli[A, B]) ReaderIOResult[A] {
return RIOR.MonadChainFirstIOK(ma, f) return RIOR.MonadChainFirstIOK(ma, f)
} }
//go:inline
func MonadTapIOK[A, B any](ma ReaderIOResult[A], f io.Kleisli[A, B]) ReaderIOResult[A] {
return RIOR.MonadTapIOK(ma, f)
}
// ChainFirstIOK chains a function that returns an [IO] but keeps the original value. // ChainFirstIOK chains a function that returns an [IO] but keeps the original value.
// This is the curried version of [MonadChainFirstIOK]. // This is the curried version of [MonadChainFirstIOK].
// //
@@ -547,10 +579,15 @@ func MonadChainFirstIOK[A, B any](ma ReaderIOResult[A], f func(A) IO[B]) ReaderI
// Returns a function that chains the IO-returning function. // Returns a function that chains the IO-returning function.
// //
//go:inline //go:inline
func ChainFirstIOK[A, B any](f func(A) IO[B]) Operator[A, A] { func ChainFirstIOK[A, B any](f io.Kleisli[A, B]) Operator[A, A] {
return RIOR.ChainFirstIOK[context.Context](f) return RIOR.ChainFirstIOK[context.Context](f)
} }
//go:inline
func TapIOK[A, B any](f io.Kleisli[A, B]) Operator[A, A] {
return RIOR.TapIOK[context.Context](f)
}
// ChainIOEitherK chains a function that returns an [IOResult] into a [ReaderIOResult] computation. // ChainIOEitherK chains a function that returns an [IOResult] into a [ReaderIOResult] computation.
// This is useful for integrating IOResult-returning functions into ReaderIOResult workflows. // This is useful for integrating IOResult-returning functions into ReaderIOResult workflows.
// //
@@ -560,7 +597,7 @@ func ChainFirstIOK[A, B any](f func(A) IO[B]) Operator[A, A] {
// Returns a function that chains the IOResult-returning function. // Returns a function that chains the IOResult-returning function.
// //
//go:inline //go:inline
func ChainIOEitherK[A, B any](f func(A) IOResult[B]) Operator[A, B] { func ChainIOEitherK[A, B any](f ioresult.Kleisli[A, B]) Operator[A, B] {
return RIOR.ChainIOEitherK[context.Context](f) return RIOR.ChainIOEitherK[context.Context](f)
} }
@@ -628,7 +665,7 @@ func Defer[A any](gen Lazy[ReaderIOResult[A]]) ReaderIOResult[A] {
// //
//go:inline //go:inline
func TryCatch[A any](f func(context.Context) func() (A, error)) ReaderIOResult[A] { func TryCatch[A any](f func(context.Context) func() (A, error)) ReaderIOResult[A] {
return RIOR.TryCatch(f, errors.IdentityError) return RIOR.TryCatch(f, errors.Identity)
} }
// MonadAlt provides an alternative [ReaderIOResult] if the first one fails. // MonadAlt provides an alternative [ReaderIOResult] if the first one fails.
@@ -723,7 +760,7 @@ func Flap[B, A any](a A) Operator[func(A) B, B] {
// //
//go:inline //go:inline
func Fold[A, B any](onLeft Kleisli[error, B], onRight Kleisli[A, B]) Operator[A, B] { func Fold[A, B any](onLeft Kleisli[error, B], onRight Kleisli[A, B]) Operator[A, B] {
return RIOR.Fold(onLeft, onRight) return RIOR.Fold(function.Flow2(onLeft, WithContext), function.Flow2(onRight, WithContext))
} }
// GetOrElse extracts the value from a [ReaderIOResult], providing a default via a function if it fails. // GetOrElse extracts the value from a [ReaderIOResult], providing a default via a function if it fails.
@@ -735,7 +772,7 @@ func Fold[A, B any](onLeft Kleisli[error, B], onRight Kleisli[A, B]) Operator[A,
// Returns a function that converts a ReaderIOResult to a ReaderIO. // Returns a function that converts a ReaderIOResult to a ReaderIO.
// //
//go:inline //go:inline
func GetOrElse[A any](onLeft func(error) ReaderIO[A]) func(ReaderIOResult[A]) ReaderIO[A] { func GetOrElse[A any](onLeft readerio.Kleisli[error, A]) func(ReaderIOResult[A]) ReaderIO[A] {
return RIOR.GetOrElse(onLeft) return RIOR.GetOrElse(onLeft)
} }
@@ -782,11 +819,21 @@ func MonadChainFirstReaderK[A, B any](ma ReaderIOResult[A], f reader.Kleisli[con
return RIOR.MonadChainFirstReaderK(ma, f) return RIOR.MonadChainFirstReaderK(ma, f)
} }
//go:inline
func MonadTapReaderK[A, B any](ma ReaderIOResult[A], f reader.Kleisli[context.Context, A, B]) ReaderIOResult[A] {
return RIOR.MonadTapReaderK(ma, f)
}
//go:inline //go:inline
func ChainFirstReaderK[A, B any](f reader.Kleisli[context.Context, A, B]) Operator[A, A] { func ChainFirstReaderK[A, B any](f reader.Kleisli[context.Context, A, B]) Operator[A, A] {
return RIOR.ChainFirstReaderK(f) return RIOR.ChainFirstReaderK(f)
} }
//go:inline
func TapReaderK[A, B any](f reader.Kleisli[context.Context, A, B]) Operator[A, A] {
return RIOR.TapReaderK(f)
}
//go:inline //go:inline
func MonadChainReaderResultK[A, B any](ma ReaderIOResult[A], f readerresult.Kleisli[A, B]) ReaderIOResult[B] { func MonadChainReaderResultK[A, B any](ma ReaderIOResult[A], f readerresult.Kleisli[A, B]) ReaderIOResult[B] {
return RIOR.MonadChainReaderResultK(ma, f) return RIOR.MonadChainReaderResultK(ma, f)
@@ -802,31 +849,51 @@ func MonadChainFirstReaderResultK[A, B any](ma ReaderIOResult[A], f readerresult
return RIOR.MonadChainFirstReaderResultK(ma, f) return RIOR.MonadChainFirstReaderResultK(ma, f)
} }
//go:inline
func MonadTapReaderResultK[A, B any](ma ReaderIOResult[A], f readerresult.Kleisli[A, B]) ReaderIOResult[A] {
return RIOR.MonadTapReaderResultK(ma, f)
}
//go:inline //go:inline
func ChainFirstReaderResultK[A, B any](f readerresult.Kleisli[A, B]) Operator[A, A] { func ChainFirstReaderResultK[A, B any](f readerresult.Kleisli[A, B]) Operator[A, A] {
return RIOR.ChainFirstReaderResultK(f) return RIOR.ChainFirstReaderResultK(f)
} }
//go:inline //go:inline
func MonadChainReaderIOK[A, B any](ma ReaderIOResult[A], f readerio.Kleisli[context.Context, A, B]) ReaderIOResult[B] { func TapReaderResultK[A, B any](f readerresult.Kleisli[A, B]) Operator[A, A] {
return RIOR.TapReaderResultK(f)
}
//go:inline
func MonadChainReaderIOK[A, B any](ma ReaderIOResult[A], f readerio.Kleisli[A, B]) ReaderIOResult[B] {
return RIOR.MonadChainReaderIOK(ma, f) return RIOR.MonadChainReaderIOK(ma, f)
} }
//go:inline //go:inline
func ChainReaderIOK[A, B any](f readerio.Kleisli[context.Context, A, B]) Operator[A, B] { func ChainReaderIOK[A, B any](f readerio.Kleisli[A, B]) Operator[A, B] {
return RIOR.ChainReaderIOK(f) return RIOR.ChainReaderIOK(f)
} }
//go:inline //go:inline
func MonadChainFirstReaderIOK[A, B any](ma ReaderIOResult[A], f readerio.Kleisli[context.Context, A, B]) ReaderIOResult[A] { func MonadChainFirstReaderIOK[A, B any](ma ReaderIOResult[A], f readerio.Kleisli[A, B]) ReaderIOResult[A] {
return RIOR.MonadChainFirstReaderIOK(ma, f) return RIOR.MonadChainFirstReaderIOK(ma, f)
} }
//go:inline //go:inline
func ChainFirstReaderIOK[A, B any](f readerio.Kleisli[context.Context, A, B]) Operator[A, A] { func MonadTapReaderIOK[A, B any](ma ReaderIOResult[A], f readerio.Kleisli[A, B]) ReaderIOResult[A] {
return RIOR.MonadTapReaderIOK(ma, f)
}
//go:inline
func ChainFirstReaderIOK[A, B any](f readerio.Kleisli[A, B]) Operator[A, A] {
return RIOR.ChainFirstReaderIOK(f) return RIOR.ChainFirstReaderIOK(f)
} }
//go:inline
func TapReaderIOK[A, B any](f readerio.Kleisli[A, B]) Operator[A, A] {
return RIOR.TapReaderIOK(f)
}
//go:inline //go:inline
func ChainReaderOptionK[A, B any](onNone func() error) func(readeroption.Kleisli[context.Context, A, B]) Operator[A, B] { func ChainReaderOptionK[A, B any](onNone func() error) func(readeroption.Kleisli[context.Context, A, B]) Operator[A, B] {
return RIOR.ChainReaderOptionK[context.Context, A, B](onNone) return RIOR.ChainReaderOptionK[context.Context, A, B](onNone)
@@ -837,7 +904,266 @@ func ChainFirstReaderOptionK[A, B any](onNone func() error) func(readeroption.Kl
return RIOR.ChainFirstReaderOptionK[context.Context, A, B](onNone) return RIOR.ChainFirstReaderOptionK[context.Context, A, B](onNone)
} }
//go:inline
func TapReaderOptionK[A, B any](onNone func() error) func(readeroption.Kleisli[context.Context, A, B]) Operator[A, A] {
return RIOR.TapReaderOptionK[context.Context, A, B](onNone)
}
//go:inline //go:inline
func Read[A any](r context.Context) func(ReaderIOResult[A]) IOResult[A] { func Read[A any](r context.Context) func(ReaderIOResult[A]) IOResult[A] {
return RIOR.Read[A](r) return RIOR.Read[A](r)
} }
// MonadChainLeft chains a computation on the left (error) side of a [ReaderIOResult].
// If the input is a Left value, it applies the function f to transform the error and potentially
// change the error type. If the input is a Right value, it passes through unchanged.
//
//go:inline
func MonadChainLeft[A any](fa ReaderIOResult[A], f Kleisli[error, A]) ReaderIOResult[A] {
return RIOR.MonadChainLeft(fa, WithContextK(f))
}
// ChainLeft is the curried version of [MonadChainLeft].
// It returns a function that chains a computation on the left (error) side of a [ReaderIOResult].
//
//go:inline
func ChainLeft[A any](f Kleisli[error, A]) Operator[A, A] {
return RIOR.ChainLeft(WithContextK(f))
}
// MonadChainFirstLeft chains a computation on the left (error) side but always returns the original error.
// If the input is a Left value, it applies the function f to the error and executes the resulting computation,
// but always returns the original Left error regardless of what f returns (Left or Right).
// If the input is a Right value, it passes through unchanged without calling f.
//
// This is useful for side effects on errors (like logging or metrics) where you want to perform an action
// when an error occurs but always propagate the original error, ensuring the error path is preserved.
//
//go:inline
func MonadChainFirstLeft[A, B any](ma ReaderIOResult[A], f Kleisli[error, B]) ReaderIOResult[A] {
return RIOR.MonadChainFirstLeft(ma, WithContextK(f))
}
//go:inline
func MonadTapLeft[A, B any](ma ReaderIOResult[A], f Kleisli[error, B]) ReaderIOResult[A] {
return RIOR.MonadTapLeft(ma, WithContextK(f))
}
// ChainFirstLeft is the curried version of [MonadChainFirstLeft].
// It returns a function that chains a computation on the left (error) side while always preserving the original error.
//
// This is particularly useful for adding error handling side effects (like logging, metrics, or notifications)
// in a functional pipeline. The original error is always returned regardless of what f returns (Left or Right),
// ensuring the error path is preserved.
//
//go:inline
func ChainFirstLeft[A, B any](f Kleisli[error, B]) Operator[A, A] {
return RIOR.ChainFirstLeft[A](WithContextK(f))
}
//go:inline
func TapLeft[A, B any](f Kleisli[error, B]) Operator[A, A] {
return RIOR.TapLeft[A](WithContextK(f))
}
// Local transforms the context.Context environment before passing it to a ReaderIOResult computation.
//
// This is the Reader's local operation, which allows you to modify the environment
// for a specific computation without affecting the outer context. The transformation
// function receives the current context and returns a new context along with a
// cancel function. The cancel function is automatically called when the computation
// completes (via defer), ensuring proper cleanup of resources.
//
// The function checks for context cancellation before applying the transformation,
// returning an error immediately if the context is already cancelled.
//
// This is useful for:
// - Adding timeouts or deadlines to specific operations
// - Adding context values for nested computations
// - Creating isolated context scopes
// - Implementing context-based dependency injection
//
// Type Parameters:
// - A: The value type of the ReaderIOResult
//
// Parameters:
// - f: A function that transforms the context and returns a cancel function
//
// Returns:
// - An Operator that runs the computation with the transformed context
//
// Example:
//
// import F "github.com/IBM/fp-go/v2/function"
//
// // Add a custom value to the context
// type key int
// const userKey key = 0
//
// addUser := readerioresult.Local[string](func(ctx context.Context) (context.Context, context.CancelFunc) {
// newCtx := context.WithValue(ctx, userKey, "Alice")
// return newCtx, func() {} // No-op cancel
// })
//
// getUser := readerioresult.FromReader(func(ctx context.Context) string {
// if user := ctx.Value(userKey); user != nil {
// return user.(string)
// }
// return "unknown"
// })
//
// result := F.Pipe1(
// getUser,
// addUser,
// )
// value, err := result(context.Background())() // Returns ("Alice", nil)
//
// Timeout Example:
//
// // Add a 5-second timeout to a specific operation
// withTimeout := readerioresult.Local[Data](func(ctx context.Context) (context.Context, context.CancelFunc) {
// return context.WithTimeout(ctx, 5*time.Second)
// })
//
// result := F.Pipe1(
// fetchData,
// withTimeout,
// )
func Local[A any](f func(context.Context) (context.Context, context.CancelFunc)) Operator[A, A] {
return func(rr ReaderIOResult[A]) ReaderIOResult[A] {
return func(ctx context.Context) IOResult[A] {
return func() Result[A] {
if ctx.Err() != nil {
return result.Left[A](context.Cause(ctx))
}
otherCtx, otherCancel := f(ctx)
defer otherCancel()
return rr(otherCtx)()
}
}
}
}
// WithTimeout adds a timeout to the context for a ReaderIOResult computation.
//
// This is a convenience wrapper around Local that uses context.WithTimeout.
// The computation must complete within the specified duration, or it will be
// cancelled. This is useful for ensuring operations don't run indefinitely
// and for implementing timeout-based error handling.
//
// The timeout is relative to when the ReaderIOResult is executed, not when
// WithTimeout is called. The cancel function is automatically called when
// the computation completes, ensuring proper cleanup. If the timeout expires,
// the computation will receive a context.DeadlineExceeded error.
//
// Type Parameters:
// - A: The value type of the ReaderIOResult
//
// Parameters:
// - timeout: The maximum duration for the computation
//
// Returns:
// - An Operator that runs the computation with a timeout
//
// Example:
//
// import (
// "time"
// F "github.com/IBM/fp-go/v2/function"
// )
//
// // Fetch data with a 5-second timeout
// fetchData := readerioresult.FromReader(func(ctx context.Context) Data {
// // Simulate slow operation
// select {
// case <-time.After(10 * time.Second):
// return Data{Value: "slow"}
// case <-ctx.Done():
// return Data{}
// }
// })
//
// result := F.Pipe1(
// fetchData,
// readerioresult.WithTimeout[Data](5*time.Second),
// )
// value, err := result(context.Background())() // Returns (Data{}, context.DeadlineExceeded) after 5s
//
// Successful Example:
//
// quickFetch := readerioresult.Right(Data{Value: "quick"})
// result := F.Pipe1(
// quickFetch,
// readerioresult.WithTimeout[Data](5*time.Second),
// )
// value, err := result(context.Background())() // Returns (Data{Value: "quick"}, nil)
func WithTimeout[A any](timeout time.Duration) Operator[A, A] {
return Local[A](func(ctx context.Context) (context.Context, context.CancelFunc) {
return context.WithTimeout(ctx, timeout)
})
}
// WithDeadline adds an absolute deadline to the context for a ReaderIOResult computation.
//
// This is a convenience wrapper around Local that uses context.WithDeadline.
// The computation must complete before the specified time, or it will be
// cancelled. This is useful for coordinating operations that must finish
// by a specific time, such as request deadlines or scheduled tasks.
//
// The deadline is an absolute time, unlike WithTimeout which uses a relative
// duration. The cancel function is automatically called when the computation
// completes, ensuring proper cleanup. If the deadline passes, the computation
// will receive a context.DeadlineExceeded error.
//
// Type Parameters:
// - A: The value type of the ReaderIOResult
//
// Parameters:
// - deadline: The absolute time by which the computation must complete
//
// Returns:
// - An Operator that runs the computation with a deadline
//
// Example:
//
// import (
// "time"
// F "github.com/IBM/fp-go/v2/function"
// )
//
// // Operation must complete by 3 PM
// deadline := time.Date(2024, 1, 1, 15, 0, 0, 0, time.UTC)
//
// fetchData := readerioresult.FromReader(func(ctx context.Context) Data {
// // Simulate operation
// select {
// case <-time.After(1 * time.Hour):
// return Data{Value: "done"}
// case <-ctx.Done():
// return Data{}
// }
// })
//
// result := F.Pipe1(
// fetchData,
// readerioresult.WithDeadline[Data](deadline),
// )
// value, err := result(context.Background())() // Returns (Data{}, context.DeadlineExceeded) if past deadline
//
// Combining with Parent Context:
//
// // If parent context already has a deadline, the earlier one takes precedence
// parentCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(1*time.Hour))
// defer cancel()
//
// laterDeadline := time.Now().Add(2 * time.Hour)
// result := F.Pipe1(
// fetchData,
// readerioresult.WithDeadline[Data](laterDeadline),
// )
// value, err := result(parentCtx)() // Will use parent's 1-hour deadline
func WithDeadline[A any](deadline time.Time) Operator[A, A] {
return Local[A](func(ctx context.Context) (context.Context, context.CancelFunc) {
return context.WithDeadline(ctx, deadline)
})
}

View File

@@ -24,6 +24,7 @@ import (
E "github.com/IBM/fp-go/v2/either" E "github.com/IBM/fp-go/v2/either"
F "github.com/IBM/fp-go/v2/function" F "github.com/IBM/fp-go/v2/function"
IOE "github.com/IBM/fp-go/v2/ioeither" IOE "github.com/IBM/fp-go/v2/ioeither"
N "github.com/IBM/fp-go/v2/number"
) )
var ( var (
@@ -37,21 +38,21 @@ var (
// Benchmark core constructors // Benchmark core constructors
func BenchmarkLeft(b *testing.B) { func BenchmarkLeft(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = Left[int](benchErr) benchRIOE = Left[int](benchErr)
} }
} }
func BenchmarkRight(b *testing.B) { func BenchmarkRight(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = Right(42) benchRIOE = Right(42)
} }
} }
func BenchmarkOf(b *testing.B) { func BenchmarkOf(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = Of(42) benchRIOE = Of(42)
} }
} }
@@ -60,7 +61,7 @@ func BenchmarkFromEither_Right(b *testing.B) {
either := E.Right[error](42) either := E.Right[error](42)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = FromEither(either) benchRIOE = FromEither(either)
} }
} }
@@ -69,7 +70,7 @@ func BenchmarkFromEither_Left(b *testing.B) {
either := E.Left[int](benchErr) either := E.Left[int](benchErr)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = FromEither(either) benchRIOE = FromEither(either)
} }
} }
@@ -77,7 +78,7 @@ func BenchmarkFromEither_Left(b *testing.B) {
func BenchmarkFromIO(b *testing.B) { func BenchmarkFromIO(b *testing.B) {
io := func() int { return 42 } io := func() int { return 42 }
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = FromIO(io) benchRIOE = FromIO(io)
} }
} }
@@ -85,7 +86,7 @@ func BenchmarkFromIO(b *testing.B) {
func BenchmarkFromIOEither_Right(b *testing.B) { func BenchmarkFromIOEither_Right(b *testing.B) {
ioe := IOE.Of[error](42) ioe := IOE.Of[error](42)
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = FromIOEither(ioe) benchRIOE = FromIOEither(ioe)
} }
} }
@@ -93,7 +94,7 @@ func BenchmarkFromIOEither_Right(b *testing.B) {
func BenchmarkFromIOEither_Left(b *testing.B) { func BenchmarkFromIOEither_Left(b *testing.B) {
ioe := IOE.Left[int](benchErr) ioe := IOE.Left[int](benchErr)
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = FromIOEither(ioe) benchRIOE = FromIOEither(ioe)
} }
} }
@@ -103,7 +104,7 @@ func BenchmarkExecute_Right(b *testing.B) {
rioe := Right(42) rioe := Right(42)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchResult = rioe(benchCtx)() benchResult = rioe(benchCtx)()
} }
} }
@@ -112,7 +113,7 @@ func BenchmarkExecute_Left(b *testing.B) {
rioe := Left[int](benchErr) rioe := Left[int](benchErr)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchResult = rioe(benchCtx)() benchResult = rioe(benchCtx)()
} }
} }
@@ -123,7 +124,7 @@ func BenchmarkExecute_WithContext(b *testing.B) {
defer cancel() defer cancel()
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchResult = rioe(ctx)() benchResult = rioe(ctx)()
} }
} }
@@ -131,40 +132,40 @@ func BenchmarkExecute_WithContext(b *testing.B) {
// Benchmark functor operations // Benchmark functor operations
func BenchmarkMonadMap_Right(b *testing.B) { func BenchmarkMonadMap_Right(b *testing.B) {
rioe := Right(42) rioe := Right(42)
mapper := func(a int) int { return a * 2 } mapper := N.Mul(2)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = MonadMap(rioe, mapper) benchRIOE = MonadMap(rioe, mapper)
} }
} }
func BenchmarkMonadMap_Left(b *testing.B) { func BenchmarkMonadMap_Left(b *testing.B) {
rioe := Left[int](benchErr) rioe := Left[int](benchErr)
mapper := func(a int) int { return a * 2 } mapper := N.Mul(2)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = MonadMap(rioe, mapper) benchRIOE = MonadMap(rioe, mapper)
} }
} }
func BenchmarkMap_Right(b *testing.B) { func BenchmarkMap_Right(b *testing.B) {
rioe := Right(42) rioe := Right(42)
mapper := Map(func(a int) int { return a * 2 }) mapper := Map(N.Mul(2))
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = mapper(rioe) benchRIOE = mapper(rioe)
} }
} }
func BenchmarkMap_Left(b *testing.B) { func BenchmarkMap_Left(b *testing.B) {
rioe := Left[int](benchErr) rioe := Left[int](benchErr)
mapper := Map(func(a int) int { return a * 2 }) mapper := Map(N.Mul(2))
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = mapper(rioe) benchRIOE = mapper(rioe)
} }
} }
@@ -174,7 +175,7 @@ func BenchmarkMapTo_Right(b *testing.B) {
mapper := MapTo[int](99) mapper := MapTo[int](99)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = mapper(rioe) benchRIOE = mapper(rioe)
} }
} }
@@ -185,7 +186,7 @@ func BenchmarkMonadChain_Right(b *testing.B) {
chainer := func(a int) ReaderIOResult[int] { return Right(a * 2) } chainer := func(a int) ReaderIOResult[int] { return Right(a * 2) }
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = MonadChain(rioe, chainer) benchRIOE = MonadChain(rioe, chainer)
} }
} }
@@ -195,7 +196,7 @@ func BenchmarkMonadChain_Left(b *testing.B) {
chainer := func(a int) ReaderIOResult[int] { return Right(a * 2) } chainer := func(a int) ReaderIOResult[int] { return Right(a * 2) }
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = MonadChain(rioe, chainer) benchRIOE = MonadChain(rioe, chainer)
} }
} }
@@ -205,7 +206,7 @@ func BenchmarkChain_Right(b *testing.B) {
chainer := Chain(func(a int) ReaderIOResult[int] { return Right(a * 2) }) chainer := Chain(func(a int) ReaderIOResult[int] { return Right(a * 2) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = chainer(rioe) benchRIOE = chainer(rioe)
} }
} }
@@ -215,7 +216,7 @@ func BenchmarkChain_Left(b *testing.B) {
chainer := Chain(func(a int) ReaderIOResult[int] { return Right(a * 2) }) chainer := Chain(func(a int) ReaderIOResult[int] { return Right(a * 2) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = chainer(rioe) benchRIOE = chainer(rioe)
} }
} }
@@ -225,7 +226,7 @@ func BenchmarkChainFirst_Right(b *testing.B) {
chainer := ChainFirst(func(a int) ReaderIOResult[string] { return Right("logged") }) chainer := ChainFirst(func(a int) ReaderIOResult[string] { return Right("logged") })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = chainer(rioe) benchRIOE = chainer(rioe)
} }
} }
@@ -235,7 +236,7 @@ func BenchmarkChainFirst_Left(b *testing.B) {
chainer := ChainFirst(func(a int) ReaderIOResult[string] { return Right("logged") }) chainer := ChainFirst(func(a int) ReaderIOResult[string] { return Right("logged") })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = chainer(rioe) benchRIOE = chainer(rioe)
} }
} }
@@ -244,7 +245,7 @@ func BenchmarkFlatten_Right(b *testing.B) {
nested := Right(Right(42)) nested := Right(Right(42))
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = Flatten(nested) benchRIOE = Flatten(nested)
} }
} }
@@ -253,28 +254,28 @@ func BenchmarkFlatten_Left(b *testing.B) {
nested := Left[ReaderIOResult[int]](benchErr) nested := Left[ReaderIOResult[int]](benchErr)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = Flatten(nested) benchRIOE = Flatten(nested)
} }
} }
// Benchmark applicative operations // Benchmark applicative operations
func BenchmarkMonadApSeq_RightRight(b *testing.B) { func BenchmarkMonadApSeq_RightRight(b *testing.B) {
fab := Right(func(a int) int { return a * 2 }) fab := Right(N.Mul(2))
fa := Right(42) fa := Right(42)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = MonadApSeq(fab, fa) benchRIOE = MonadApSeq(fab, fa)
} }
} }
func BenchmarkMonadApSeq_RightLeft(b *testing.B) { func BenchmarkMonadApSeq_RightLeft(b *testing.B) {
fab := Right(func(a int) int { return a * 2 }) fab := Right(N.Mul(2))
fa := Left[int](benchErr) fa := Left[int](benchErr)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = MonadApSeq(fab, fa) benchRIOE = MonadApSeq(fab, fa)
} }
} }
@@ -284,27 +285,27 @@ func BenchmarkMonadApSeq_LeftRight(b *testing.B) {
fa := Right(42) fa := Right(42)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = MonadApSeq(fab, fa) benchRIOE = MonadApSeq(fab, fa)
} }
} }
func BenchmarkMonadApPar_RightRight(b *testing.B) { func BenchmarkMonadApPar_RightRight(b *testing.B) {
fab := Right(func(a int) int { return a * 2 }) fab := Right(N.Mul(2))
fa := Right(42) fa := Right(42)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = MonadApPar(fab, fa) benchRIOE = MonadApPar(fab, fa)
} }
} }
func BenchmarkMonadApPar_RightLeft(b *testing.B) { func BenchmarkMonadApPar_RightLeft(b *testing.B) {
fab := Right(func(a int) int { return a * 2 }) fab := Right(N.Mul(2))
fa := Left[int](benchErr) fa := Left[int](benchErr)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = MonadApPar(fab, fa) benchRIOE = MonadApPar(fab, fa)
} }
} }
@@ -314,30 +315,30 @@ func BenchmarkMonadApPar_LeftRight(b *testing.B) {
fa := Right(42) fa := Right(42)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = MonadApPar(fab, fa) benchRIOE = MonadApPar(fab, fa)
} }
} }
// Benchmark execution of applicative operations // Benchmark execution of applicative operations
func BenchmarkExecuteApSeq_RightRight(b *testing.B) { func BenchmarkExecuteApSeq_RightRight(b *testing.B) {
fab := Right(func(a int) int { return a * 2 }) fab := Right(N.Mul(2))
fa := Right(42) fa := Right(42)
rioe := MonadApSeq(fab, fa) rioe := MonadApSeq(fab, fa)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchResult = rioe(benchCtx)() benchResult = rioe(benchCtx)()
} }
} }
func BenchmarkExecuteApPar_RightRight(b *testing.B) { func BenchmarkExecuteApPar_RightRight(b *testing.B) {
fab := Right(func(a int) int { return a * 2 }) fab := Right(N.Mul(2))
fa := Right(42) fa := Right(42)
rioe := MonadApPar(fab, fa) rioe := MonadApPar(fab, fa)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchResult = rioe(benchCtx)() benchResult = rioe(benchCtx)()
} }
} }
@@ -348,7 +349,7 @@ func BenchmarkAlt_RightRight(b *testing.B) {
alternative := Alt(func() ReaderIOResult[int] { return Right(99) }) alternative := Alt(func() ReaderIOResult[int] { return Right(99) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = alternative(rioe) benchRIOE = alternative(rioe)
} }
} }
@@ -358,7 +359,7 @@ func BenchmarkAlt_LeftRight(b *testing.B) {
alternative := Alt(func() ReaderIOResult[int] { return Right(99) }) alternative := Alt(func() ReaderIOResult[int] { return Right(99) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = alternative(rioe) benchRIOE = alternative(rioe)
} }
} }
@@ -368,7 +369,7 @@ func BenchmarkOrElse_Right(b *testing.B) {
recover := OrElse(func(e error) ReaderIOResult[int] { return Right(0) }) recover := OrElse(func(e error) ReaderIOResult[int] { return Right(0) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = recover(rioe) benchRIOE = recover(rioe)
} }
} }
@@ -378,7 +379,7 @@ func BenchmarkOrElse_Left(b *testing.B) {
recover := OrElse(func(e error) ReaderIOResult[int] { return Right(0) }) recover := OrElse(func(e error) ReaderIOResult[int] { return Right(0) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = recover(rioe) benchRIOE = recover(rioe)
} }
} }
@@ -389,7 +390,7 @@ func BenchmarkChainEitherK_Right(b *testing.B) {
chainer := ChainEitherK(func(a int) Either[int] { return E.Right[error](a * 2) }) chainer := ChainEitherK(func(a int) Either[int] { return E.Right[error](a * 2) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = chainer(rioe) benchRIOE = chainer(rioe)
} }
} }
@@ -399,7 +400,7 @@ func BenchmarkChainEitherK_Left(b *testing.B) {
chainer := ChainEitherK(func(a int) Either[int] { return E.Right[error](a * 2) }) chainer := ChainEitherK(func(a int) Either[int] { return E.Right[error](a * 2) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = chainer(rioe) benchRIOE = chainer(rioe)
} }
} }
@@ -409,7 +410,7 @@ func BenchmarkChainIOK_Right(b *testing.B) {
chainer := ChainIOK(func(a int) func() int { return func() int { return a * 2 } }) chainer := ChainIOK(func(a int) func() int { return func() int { return a * 2 } })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = chainer(rioe) benchRIOE = chainer(rioe)
} }
} }
@@ -419,7 +420,7 @@ func BenchmarkChainIOK_Left(b *testing.B) {
chainer := ChainIOK(func(a int) func() int { return func() int { return a * 2 } }) chainer := ChainIOK(func(a int) func() int { return func() int { return a * 2 } })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = chainer(rioe) benchRIOE = chainer(rioe)
} }
} }
@@ -429,7 +430,7 @@ func BenchmarkChainIOEitherK_Right(b *testing.B) {
chainer := ChainIOEitherK(func(a int) IOEither[int] { return IOE.Of[error](a * 2) }) chainer := ChainIOEitherK(func(a int) IOEither[int] { return IOE.Of[error](a * 2) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = chainer(rioe) benchRIOE = chainer(rioe)
} }
} }
@@ -439,7 +440,7 @@ func BenchmarkChainIOEitherK_Left(b *testing.B) {
chainer := ChainIOEitherK(func(a int) IOEither[int] { return IOE.Of[error](a * 2) }) chainer := ChainIOEitherK(func(a int) IOEither[int] { return IOE.Of[error](a * 2) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = chainer(rioe) benchRIOE = chainer(rioe)
} }
} }
@@ -447,7 +448,7 @@ func BenchmarkChainIOEitherK_Left(b *testing.B) {
// Benchmark context operations // Benchmark context operations
func BenchmarkAsk(b *testing.B) { func BenchmarkAsk(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = Ask() _ = Ask()
} }
} }
@@ -455,7 +456,7 @@ func BenchmarkAsk(b *testing.B) {
func BenchmarkDefer(b *testing.B) { func BenchmarkDefer(b *testing.B) {
gen := func() ReaderIOResult[int] { return Right(42) } gen := func() ReaderIOResult[int] { return Right(42) }
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = Defer(gen) benchRIOE = Defer(gen)
} }
} }
@@ -463,7 +464,7 @@ func BenchmarkDefer(b *testing.B) {
func BenchmarkMemoize(b *testing.B) { func BenchmarkMemoize(b *testing.B) {
rioe := Right(42) rioe := Right(42)
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = Memoize(rioe) benchRIOE = Memoize(rioe)
} }
} }
@@ -472,14 +473,14 @@ func BenchmarkMemoize(b *testing.B) {
func BenchmarkDelay_Construction(b *testing.B) { func BenchmarkDelay_Construction(b *testing.B) {
rioe := Right(42) rioe := Right(42)
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = Delay[int](time.Millisecond)(rioe) benchRIOE = Delay[int](time.Millisecond)(rioe)
} }
} }
func BenchmarkTimer_Construction(b *testing.B) { func BenchmarkTimer_Construction(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = Timer(time.Millisecond) _ = Timer(time.Millisecond)
} }
} }
@@ -490,7 +491,7 @@ func BenchmarkTryCatch_Success(b *testing.B) {
return func() (int, error) { return 42, nil } return func() (int, error) { return 42, nil }
} }
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = TryCatch(f) benchRIOE = TryCatch(f)
} }
} }
@@ -500,7 +501,7 @@ func BenchmarkTryCatch_Error(b *testing.B) {
return func() (int, error) { return 0, benchErr } return func() (int, error) { return 0, benchErr }
} }
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = TryCatch(f) benchRIOE = TryCatch(f)
} }
} }
@@ -512,7 +513,7 @@ func BenchmarkExecuteTryCatch_Success(b *testing.B) {
rioe := TryCatch(f) rioe := TryCatch(f)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchResult = rioe(benchCtx)() benchResult = rioe(benchCtx)()
} }
} }
@@ -524,7 +525,7 @@ func BenchmarkExecuteTryCatch_Error(b *testing.B) {
rioe := TryCatch(f) rioe := TryCatch(f)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchResult = rioe(benchCtx)() benchResult = rioe(benchCtx)()
} }
} }
@@ -534,10 +535,10 @@ func BenchmarkPipeline_Map_Right(b *testing.B) {
rioe := Right(21) rioe := Right(21)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = F.Pipe1( benchRIOE = F.Pipe1(
rioe, rioe,
Map(func(x int) int { return x * 2 }), Map(N.Mul(2)),
) )
} }
} }
@@ -546,10 +547,10 @@ func BenchmarkPipeline_Map_Left(b *testing.B) {
rioe := Left[int](benchErr) rioe := Left[int](benchErr)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = F.Pipe1( benchRIOE = F.Pipe1(
rioe, rioe,
Map(func(x int) int { return x * 2 }), Map(N.Mul(2)),
) )
} }
} }
@@ -558,7 +559,7 @@ func BenchmarkPipeline_Chain_Right(b *testing.B) {
rioe := Right(21) rioe := Right(21)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = F.Pipe1( benchRIOE = F.Pipe1(
rioe, rioe,
Chain(func(x int) ReaderIOResult[int] { return Right(x * 2) }), Chain(func(x int) ReaderIOResult[int] { return Right(x * 2) }),
@@ -570,7 +571,7 @@ func BenchmarkPipeline_Chain_Left(b *testing.B) {
rioe := Left[int](benchErr) rioe := Left[int](benchErr)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = F.Pipe1( benchRIOE = F.Pipe1(
rioe, rioe,
Chain(func(x int) ReaderIOResult[int] { return Right(x * 2) }), Chain(func(x int) ReaderIOResult[int] { return Right(x * 2) }),
@@ -582,12 +583,12 @@ func BenchmarkPipeline_Complex_Right(b *testing.B) {
rioe := Right(10) rioe := Right(10)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = F.Pipe3( benchRIOE = F.Pipe3(
rioe, rioe,
Map(func(x int) int { return x * 2 }), Map(N.Mul(2)),
Chain(func(x int) ReaderIOResult[int] { return Right(x + 1) }), Chain(func(x int) ReaderIOResult[int] { return Right(x + 1) }),
Map(func(x int) int { return x * 2 }), Map(N.Mul(2)),
) )
} }
} }
@@ -596,12 +597,12 @@ func BenchmarkPipeline_Complex_Left(b *testing.B) {
rioe := Left[int](benchErr) rioe := Left[int](benchErr)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchRIOE = F.Pipe3( benchRIOE = F.Pipe3(
rioe, rioe,
Map(func(x int) int { return x * 2 }), Map(N.Mul(2)),
Chain(func(x int) ReaderIOResult[int] { return Right(x + 1) }), Chain(func(x int) ReaderIOResult[int] { return Right(x + 1) }),
Map(func(x int) int { return x * 2 }), Map(N.Mul(2)),
) )
} }
} }
@@ -609,13 +610,13 @@ func BenchmarkPipeline_Complex_Left(b *testing.B) {
func BenchmarkExecutePipeline_Complex_Right(b *testing.B) { func BenchmarkExecutePipeline_Complex_Right(b *testing.B) {
rioe := F.Pipe3( rioe := F.Pipe3(
Right(10), Right(10),
Map(func(x int) int { return x * 2 }), Map(N.Mul(2)),
Chain(func(x int) ReaderIOResult[int] { return Right(x + 1) }), Chain(func(x int) ReaderIOResult[int] { return Right(x + 1) }),
Map(func(x int) int { return x * 2 }), Map(N.Mul(2)),
) )
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchResult = rioe(benchCtx)() benchResult = rioe(benchCtx)()
} }
} }
@@ -624,7 +625,7 @@ func BenchmarkExecutePipeline_Complex_Right(b *testing.B) {
func BenchmarkDo(b *testing.B) { func BenchmarkDo(b *testing.B) {
type State struct{ value int } type State struct{ value int }
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = Do(State{}) _ = Do(State{})
} }
} }
@@ -642,7 +643,7 @@ func BenchmarkBind_Right(b *testing.B) {
) )
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = binder(initial) _ = binder(initial)
} }
} }
@@ -658,7 +659,7 @@ func BenchmarkLet_Right(b *testing.B) {
) )
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = letter(initial) _ = letter(initial)
} }
} }
@@ -674,7 +675,7 @@ func BenchmarkApS_Right(b *testing.B) {
) )
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = aps(initial) _ = aps(initial)
} }
} }
@@ -687,7 +688,7 @@ func BenchmarkTraverseArray_Empty(b *testing.B) {
}) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = traverser(arr) _ = traverser(arr)
} }
} }
@@ -699,7 +700,7 @@ func BenchmarkTraverseArray_Small(b *testing.B) {
}) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = traverser(arr) _ = traverser(arr)
} }
} }
@@ -714,7 +715,7 @@ func BenchmarkTraverseArray_Medium(b *testing.B) {
}) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = traverser(arr) _ = traverser(arr)
} }
} }
@@ -726,7 +727,7 @@ func BenchmarkTraverseArraySeq_Small(b *testing.B) {
}) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = traverser(arr) _ = traverser(arr)
} }
} }
@@ -738,7 +739,7 @@ func BenchmarkTraverseArrayPar_Small(b *testing.B) {
}) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = traverser(arr) _ = traverser(arr)
} }
} }
@@ -751,7 +752,7 @@ func BenchmarkSequenceArray_Small(b *testing.B) {
} }
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = SequenceArray(arr) _ = SequenceArray(arr)
} }
} }
@@ -763,7 +764,7 @@ func BenchmarkExecuteTraverseArray_Small(b *testing.B) {
})(arr) })(arr)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = rioe(benchCtx)() _ = rioe(benchCtx)()
} }
} }
@@ -775,7 +776,7 @@ func BenchmarkExecuteTraverseArraySeq_Small(b *testing.B) {
})(arr) })(arr)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = rioe(benchCtx)() _ = rioe(benchCtx)()
} }
} }
@@ -787,7 +788,7 @@ func BenchmarkExecuteTraverseArrayPar_Small(b *testing.B) {
})(arr) })(arr)
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = rioe(benchCtx)() _ = rioe(benchCtx)()
} }
} }
@@ -800,7 +801,7 @@ func BenchmarkTraverseRecord_Small(b *testing.B) {
}) })
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = traverser(rec) _ = traverser(rec)
} }
} }
@@ -813,7 +814,7 @@ func BenchmarkSequenceRecord_Small(b *testing.B) {
} }
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = SequenceRecord(rec) _ = SequenceRecord(rec)
} }
} }
@@ -826,7 +827,7 @@ func BenchmarkWithResource_Success(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
_ = WithResource[int](acquire, release)(body) _ = WithResource[int](acquire, release)(body)
} }
} }
@@ -839,7 +840,7 @@ func BenchmarkExecuteWithResource_Success(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchResult = rioe(benchCtx)() benchResult = rioe(benchCtx)()
} }
} }
@@ -852,7 +853,7 @@ func BenchmarkExecuteWithResource_ErrorInBody(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchResult = rioe(benchCtx)() benchResult = rioe(benchCtx)()
} }
} }
@@ -865,13 +866,13 @@ func BenchmarkExecute_CanceledContext(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchResult = rioe(ctx)() benchResult = rioe(ctx)()
} }
} }
func BenchmarkExecuteApPar_CanceledContext(b *testing.B) { func BenchmarkExecuteApPar_CanceledContext(b *testing.B) {
fab := Right(func(a int) int { return a * 2 }) fab := Right(N.Mul(2))
fa := Right(42) fa := Right(42)
rioe := MonadApPar(fab, fa) rioe := MonadApPar(fab, fa)
ctx, cancel := context.WithCancel(benchCtx) ctx, cancel := context.WithCancel(benchCtx)
@@ -879,7 +880,7 @@ func BenchmarkExecuteApPar_CanceledContext(b *testing.B) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
for i := 0; i < b.N; i++ { for b.Loop() {
benchResult = rioe(ctx)() benchResult = rioe(ctx)()
} }
} }

View File

@@ -26,6 +26,7 @@ import (
IOG "github.com/IBM/fp-go/v2/io" IOG "github.com/IBM/fp-go/v2/io"
IOE "github.com/IBM/fp-go/v2/ioeither" IOE "github.com/IBM/fp-go/v2/ioeither"
M "github.com/IBM/fp-go/v2/monoid" M "github.com/IBM/fp-go/v2/monoid"
N "github.com/IBM/fp-go/v2/number"
O "github.com/IBM/fp-go/v2/option" O "github.com/IBM/fp-go/v2/option"
R "github.com/IBM/fp-go/v2/reader" R "github.com/IBM/fp-go/v2/reader"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -77,27 +78,27 @@ func TestOf(t *testing.T) {
func TestMonadMap(t *testing.T) { func TestMonadMap(t *testing.T) {
t.Run("Map over Right", func(t *testing.T) { t.Run("Map over Right", func(t *testing.T) {
result := MonadMap(Of(5), func(x int) int { return x * 2 }) result := MonadMap(Of(5), N.Mul(2))
assert.Equal(t, E.Right[error](10), result(context.Background())()) assert.Equal(t, E.Right[error](10), result(context.Background())())
}) })
t.Run("Map over Left", func(t *testing.T) { t.Run("Map over Left", func(t *testing.T) {
err := errors.New("test error") err := errors.New("test error")
result := MonadMap(Left[int](err), func(x int) int { return x * 2 }) result := MonadMap(Left[int](err), N.Mul(2))
assert.Equal(t, E.Left[int](err), result(context.Background())()) assert.Equal(t, E.Left[int](err), result(context.Background())())
}) })
} }
func TestMap(t *testing.T) { func TestMap(t *testing.T) {
t.Run("Map with success", func(t *testing.T) { t.Run("Map with success", func(t *testing.T) {
mapper := Map(func(x int) int { return x * 2 }) mapper := Map(N.Mul(2))
result := mapper(Of(5)) result := mapper(Of(5))
assert.Equal(t, E.Right[error](10), result(context.Background())()) assert.Equal(t, E.Right[error](10), result(context.Background())())
}) })
t.Run("Map with error", func(t *testing.T) { t.Run("Map with error", func(t *testing.T) {
err := errors.New("test error") err := errors.New("test error")
mapper := Map(func(x int) int { return x * 2 }) mapper := Map(N.Mul(2))
result := mapper(Left[int](err)) result := mapper(Left[int](err))
assert.Equal(t, E.Left[int](err), result(context.Background())()) assert.Equal(t, E.Left[int](err), result(context.Background())())
}) })
@@ -182,7 +183,7 @@ func TestChainFirst(t *testing.T) {
func TestMonadApSeq(t *testing.T) { func TestMonadApSeq(t *testing.T) {
t.Run("ApSeq with success", func(t *testing.T) { t.Run("ApSeq with success", func(t *testing.T) {
fab := Of(func(x int) int { return x * 2 }) fab := Of(N.Mul(2))
fa := Of(5) fa := Of(5)
result := MonadApSeq(fab, fa) result := MonadApSeq(fab, fa)
assert.Equal(t, E.Right[error](10), result(context.Background())()) assert.Equal(t, E.Right[error](10), result(context.Background())())
@@ -198,7 +199,7 @@ func TestMonadApSeq(t *testing.T) {
t.Run("ApSeq with error in value", func(t *testing.T) { t.Run("ApSeq with error in value", func(t *testing.T) {
err := errors.New("test error") err := errors.New("test error")
fab := Of(func(x int) int { return x * 2 }) fab := Of(N.Mul(2))
fa := Left[int](err) fa := Left[int](err)
result := MonadApSeq(fab, fa) result := MonadApSeq(fab, fa)
assert.Equal(t, E.Left[int](err), result(context.Background())()) assert.Equal(t, E.Left[int](err), result(context.Background())())
@@ -207,7 +208,7 @@ func TestMonadApSeq(t *testing.T) {
func TestApSeq(t *testing.T) { func TestApSeq(t *testing.T) {
fa := Of(5) fa := Of(5)
fab := Of(func(x int) int { return x * 2 }) fab := Of(N.Mul(2))
result := MonadApSeq(fab, fa) result := MonadApSeq(fab, fa)
assert.Equal(t, E.Right[error](10), result(context.Background())()) assert.Equal(t, E.Right[error](10), result(context.Background())())
} }
@@ -215,7 +216,7 @@ func TestApSeq(t *testing.T) {
func TestApPar(t *testing.T) { func TestApPar(t *testing.T) {
t.Run("ApPar with success", func(t *testing.T) { t.Run("ApPar with success", func(t *testing.T) {
fa := Of(5) fa := Of(5)
fab := Of(func(x int) int { return x * 2 }) fab := Of(N.Mul(2))
result := MonadApPar(fab, fa) result := MonadApPar(fab, fa)
assert.Equal(t, E.Right[error](10), result(context.Background())()) assert.Equal(t, E.Right[error](10), result(context.Background())())
}) })
@@ -224,7 +225,7 @@ func TestApPar(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
cancel() cancel()
fa := Of(5) fa := Of(5)
fab := Of(func(x int) int { return x * 2 }) fab := Of(N.Mul(2))
result := MonadApPar(fab, fa) result := MonadApPar(fab, fa)
res := result(ctx)() res := result(ctx)()
assert.True(t, E.IsLeft(res)) assert.True(t, E.IsLeft(res))
@@ -566,15 +567,13 @@ func TestMemoize(t *testing.T) {
res1 := computation(context.Background())() res1 := computation(context.Background())()
assert.True(t, E.IsRight(res1)) assert.True(t, E.IsRight(res1))
val1 := E.ToOption(res1) val1 := E.ToOption(res1)
v1, _ := O.Unwrap(val1) assert.Equal(t, O.Of(1), val1)
assert.Equal(t, 1, v1)
// Second execution should return cached value // Second execution should return cached value
res2 := computation(context.Background())() res2 := computation(context.Background())()
assert.True(t, E.IsRight(res2)) assert.True(t, E.IsRight(res2))
val2 := E.ToOption(res2) val2 := E.ToOption(res2)
v2, _ := O.Unwrap(val2) assert.Equal(t, O.Of(1), val2)
assert.Equal(t, 1, v2)
// Counter should only be incremented once // Counter should only be incremented once
assert.Equal(t, 1, counter) assert.Equal(t, 1, counter)
@@ -587,14 +586,14 @@ func TestFlatten(t *testing.T) {
} }
func TestMonadFlap(t *testing.T) { func TestMonadFlap(t *testing.T) {
fab := Of(func(x int) int { return x * 2 }) fab := Of(N.Mul(2))
result := MonadFlap(fab, 5) result := MonadFlap(fab, 5)
assert.Equal(t, E.Right[error](10), result(context.Background())()) assert.Equal(t, E.Right[error](10), result(context.Background())())
} }
func TestFlap(t *testing.T) { func TestFlap(t *testing.T) {
flapper := Flap[int](5) flapper := Flap[int](5)
result := flapper(Of(func(x int) int { return x * 2 })) result := flapper(Of(N.Mul(2)))
assert.Equal(t, E.Right[error](10), result(context.Background())()) assert.Equal(t, E.Right[error](10), result(context.Background())())
} }
@@ -738,9 +737,7 @@ func TestTraverseArray(t *testing.T) {
res := result(context.Background())() res := result(context.Background())()
assert.True(t, E.IsRight(res)) assert.True(t, E.IsRight(res))
arrOpt := E.ToOption(res) arrOpt := E.ToOption(res)
assert.True(t, O.IsSome(arrOpt)) assert.Equal(t, O.Of([]int{2, 4, 6}), arrOpt)
resultArr, _ := O.Unwrap(arrOpt)
assert.Equal(t, []int{2, 4, 6}, resultArr)
}) })
t.Run("TraverseArray with error", func(t *testing.T) { t.Run("TraverseArray with error", func(t *testing.T) {
@@ -764,9 +761,7 @@ func TestSequenceArray(t *testing.T) {
res := result(context.Background())() res := result(context.Background())()
assert.True(t, E.IsRight(res)) assert.True(t, E.IsRight(res))
arrOpt := E.ToOption(res) arrOpt := E.ToOption(res)
assert.True(t, O.IsSome(arrOpt)) assert.Equal(t, O.Of([]int{1, 2, 3}), arrOpt)
resultArr, _ := O.Unwrap(arrOpt)
assert.Equal(t, []int{1, 2, 3}, resultArr)
} }
func TestTraverseRecord(t *testing.T) { func TestTraverseRecord(t *testing.T) {

View File

@@ -284,3 +284,160 @@ func TestWithResourceErrorInRelease(t *testing.T) {
assert.Equal(t, 0, countRelease) assert.Equal(t, 0, countRelease)
assert.Equal(t, E.Left[int](err), res) assert.Equal(t, E.Left[int](err), res)
} }
func TestMonadChainFirstLeft(t *testing.T) {
ctx := context.Background()
// Test with Left value - function returns Left, always preserves original error
t.Run("Left value with function returning Left preserves original error", func(t *testing.T) {
sideEffectCalled := false
originalErr := fmt.Errorf("original error")
result := MonadChainFirstLeft(
Left[int](originalErr),
func(e error) ReaderIOResult[int] {
sideEffectCalled = true
return Left[int](fmt.Errorf("new error")) // This error is ignored
},
)
actualResult := result(ctx)()
assert.True(t, sideEffectCalled)
assert.Equal(t, E.Left[int](originalErr), actualResult)
})
// Test with Left value - function returns Right, still returns original Left
t.Run("Left value with function returning Right still returns original Left", func(t *testing.T) {
var capturedError error
originalErr := fmt.Errorf("validation failed")
result := MonadChainFirstLeft(
Left[int](originalErr),
func(e error) ReaderIOResult[int] {
capturedError = e
return Right(999) // This Right value is ignored
},
)
actualResult := result(ctx)()
assert.Equal(t, originalErr, capturedError)
assert.Equal(t, E.Left[int](originalErr), actualResult)
})
// Test with Right value - should pass through without calling function
t.Run("Right value passes through", func(t *testing.T) {
sideEffectCalled := false
result := MonadChainFirstLeft(
Right(42),
func(e error) ReaderIOResult[int] {
sideEffectCalled = true
return Left[int](fmt.Errorf("should not be called"))
},
)
assert.False(t, sideEffectCalled)
assert.Equal(t, E.Right[error](42), result(ctx)())
})
// Test that side effects are executed but original error is always preserved
t.Run("Side effects executed but original error preserved", func(t *testing.T) {
effectCount := 0
originalErr := fmt.Errorf("original error")
result := MonadChainFirstLeft(
Left[int](originalErr),
func(e error) ReaderIOResult[int] {
effectCount++
// Try to return Right, but original Left should still be returned
return Right(999)
},
)
actualResult := result(ctx)()
assert.Equal(t, 1, effectCount)
assert.Equal(t, E.Left[int](originalErr), actualResult)
})
}
func TestChainFirstLeft(t *testing.T) {
ctx := context.Background()
// Test with Left value - function returns Left, always preserves original error
t.Run("Left value with function returning Left preserves error", func(t *testing.T) {
var captured error
originalErr := fmt.Errorf("test error")
chainFn := ChainFirstLeft[int](func(e error) ReaderIOResult[int] {
captured = e
return Left[int](fmt.Errorf("ignored error"))
})
result := F.Pipe1(
Left[int](originalErr),
chainFn,
)
actualResult := result(ctx)()
assert.Equal(t, originalErr, captured)
assert.Equal(t, E.Left[int](originalErr), actualResult)
})
// Test with Left value - function returns Right, still returns original Left
t.Run("Left value with function returning Right still returns original Left", func(t *testing.T) {
var captured error
originalErr := fmt.Errorf("test error")
chainFn := ChainFirstLeft[int](func(e error) ReaderIOResult[int] {
captured = e
return Right(42) // This Right is ignored
})
result := F.Pipe1(
Left[int](originalErr),
chainFn,
)
actualResult := result(ctx)()
assert.Equal(t, originalErr, captured)
assert.Equal(t, E.Left[int](originalErr), actualResult)
})
// Test with Right value - should pass through without calling function
t.Run("Right value passes through", func(t *testing.T) {
called := false
chainFn := ChainFirstLeft[int](func(e error) ReaderIOResult[int] {
called = true
return Right(0)
})
result := F.Pipe1(
Right(100),
chainFn,
)
assert.False(t, called)
assert.Equal(t, E.Right[error](100), result(ctx)())
})
// Test that original error is always preserved regardless of what f returns
t.Run("Original error always preserved", func(t *testing.T) {
originalErr := fmt.Errorf("original")
chainFn := ChainFirstLeft[int](func(e error) ReaderIOResult[int] {
// Try to return Right, but original Left should still be returned
return Right(999)
})
result := F.Pipe1(
Left[int](originalErr),
chainFn,
)
assert.Equal(t, E.Left[int](originalErr), result(ctx)())
})
// Test logging with Left preservation
t.Run("Logging with Left preservation", func(t *testing.T) {
errorLog := []string{}
originalErr := fmt.Errorf("step1")
logError := ChainFirstLeft[string](func(e error) ReaderIOResult[string] {
errorLog = append(errorLog, "Logged: "+e.Error())
return Left[string](fmt.Errorf("log entry")) // This is ignored
})
result := F.Pipe2(
Left[string](originalErr),
logError,
ChainLeft(func(e error) ReaderIOResult[string] {
return Left[string](fmt.Errorf("step2"))
}),
)
actualResult := result(ctx)()
assert.Equal(t, []string{"Logged: step1"}, errorLog)
assert.Equal(t, E.Left[string](fmt.Errorf("step2")), actualResult)
})
}

View File

@@ -0,0 +1,183 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerioresult
import (
F "github.com/IBM/fp-go/v2/function"
RIOR "github.com/IBM/fp-go/v2/readerioresult"
)
// TailRec implements stack-safe tail recursion for the context-aware ReaderIOResult monad.
//
// This function enables recursive computations that combine four powerful concepts:
// - Context awareness: Automatic cancellation checking via [context.Context]
// - Environment dependency (Reader aspect): Access to configuration, context, or dependencies
// - Side effects (IO aspect): Logging, file I/O, network calls, etc.
// - Error handling (Either aspect): Computations that can fail with an error
//
// The function uses an iterative loop to execute the recursion, making it safe for deep
// or unbounded recursion without risking stack overflow. Additionally, it integrates
// context cancellation checking through [WithContext], ensuring that recursive computations
// can be cancelled gracefully.
//
// # How It Works
//
// TailRec takes a Kleisli arrow that returns Trampoline[A, B]:
// - Bounce(A): Continue recursion with the new state A
// - Land(B): Terminate recursion successfully and return the final result B
//
// The function wraps each iteration with [WithContext] to ensure context cancellation
// is checked before each recursive step. If the context is cancelled, the recursion
// terminates early with a context cancellation error.
//
// # Type Parameters
//
// - A: The state type that changes during recursion
// - B: The final result type when recursion terminates successfully
//
// # Parameters
//
// - f: A Kleisli arrow (A => ReaderIOResult[Trampoline[A, B]]) that:
// - Takes the current state A
// - Returns a ReaderIOResult that depends on [context.Context]
// - Can fail with error (Left in the outer Either)
// - Produces Trampoline[A, B] to control recursion flow (Right in the outer Either)
//
// # Returns
//
// A Kleisli arrow (A => ReaderIOResult[B]) that:
// - Takes an initial state A
// - Returns a ReaderIOResult that requires [context.Context]
// - Can fail with error or context cancellation
// - Produces the final result B after recursion completes
//
// # Context Cancellation
//
// Unlike the base [readerioresult.TailRec], this version automatically integrates
// context cancellation checking:
// - Each recursive iteration checks if the context is cancelled
// - If cancelled, recursion terminates immediately with a cancellation error
// - This prevents runaway recursive computations in cancelled contexts
// - Enables responsive cancellation for long-running recursive operations
//
// # Use Cases
//
// 1. Cancellable recursive algorithms:
// - Tree traversals that can be cancelled mid-operation
// - Graph algorithms with timeout requirements
// - Recursive parsers that respect cancellation
//
// 2. Long-running recursive computations:
// - File system traversals with cancellation support
// - Network operations with timeout handling
// - Database operations with connection timeout awareness
//
// 3. Interactive recursive operations:
// - User-initiated operations that can be cancelled
// - Background tasks with cancellation support
// - Streaming operations with graceful shutdown
//
// # Example: Cancellable Countdown
//
// countdownStep := func(n int) readerioresult.ReaderIOResult[tailrec.Trampoline[int, string]] {
// return func(ctx context.Context) ioeither.IOEither[error, tailrec.Trampoline[int, string]] {
// return func() either.Either[error, tailrec.Trampoline[int, string]] {
// if n <= 0 {
// return either.Right[error](tailrec.Land[int]("Done!"))
// }
// // Simulate some work
// time.Sleep(100 * time.Millisecond)
// return either.Right[error](tailrec.Bounce[string](n - 1))
// }
// }
// }
//
// countdown := readerioresult.TailRec(countdownStep)
//
// // With cancellation
// ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
// defer cancel()
// result := countdown(10)(ctx)() // Will be cancelled after ~500ms
//
// # Example: Cancellable File Processing
//
// type ProcessState struct {
// files []string
// processed []string
// }
//
// processStep := func(state ProcessState) readerioresult.ReaderIOResult[tailrec.Trampoline[ProcessState, []string]] {
// return func(ctx context.Context) ioeither.IOEither[error, tailrec.Trampoline[ProcessState, []string]] {
// return func() either.Either[error, tailrec.Trampoline[ProcessState, []string]] {
// if len(state.files) == 0 {
// return either.Right[error](tailrec.Land[ProcessState](state.processed))
// }
//
// file := state.files[0]
// // Process file (this could be cancelled via context)
// if err := processFileWithContext(ctx, file); err != nil {
// return either.Left[tailrec.Trampoline[ProcessState, []string]](err)
// }
//
// return either.Right[error](tailrec.Bounce[[]string](ProcessState{
// files: state.files[1:],
// processed: append(state.processed, file),
// }))
// }
// }
// }
//
// processFiles := readerioresult.TailRec(processStep)
// ctx, cancel := context.WithCancel(context.Background())
//
// // Can be cancelled at any point during processing
// go func() {
// time.Sleep(2 * time.Second)
// cancel() // Cancel after 2 seconds
// }()
//
// result := processFiles(ProcessState{files: manyFiles})(ctx)()
//
// # Stack Safety
//
// The iterative implementation ensures that even deeply recursive computations
// (thousands or millions of iterations) will not cause stack overflow, while
// still respecting context cancellation:
//
// // Safe for very large inputs with cancellation support
// largeCountdown := readerioresult.TailRec(countdownStep)
// ctx := context.Background()
// result := largeCountdown(1000000)(ctx)() // Safe, no stack overflow
//
// # Performance Considerations
//
// - Each iteration includes context cancellation checking overhead
// - Context checking happens before each recursive step
// - For performance-critical code, consider the cancellation checking cost
// - The [WithContext] wrapper adds minimal overhead for cancellation safety
//
// # See Also
//
// - [readerioresult.TailRec]: Base tail recursion without automatic context checking
// - [WithContext]: Context cancellation wrapper used internally
// - [Chain]: For sequencing ReaderIOResult computations
// - [Ask]: For accessing the context
// - [Left]/[Right]: For creating error/success values
//
//go:inline
func TailRec[A, B any](f Kleisli[A, Trampoline[A, B]]) Kleisli[A, B] {
return RIOR.TailRec(F.Flow2(f, WithContext))
}

View File

@@ -0,0 +1,434 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerioresult
import (
"context"
"errors"
"fmt"
"sync/atomic"
"testing"
"time"
A "github.com/IBM/fp-go/v2/array"
E "github.com/IBM/fp-go/v2/either"
"github.com/IBM/fp-go/v2/tailrec"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestTailRec_BasicRecursion(t *testing.T) {
// Test basic countdown recursion
countdownStep := func(n int) ReaderIOResult[Trampoline[int, string]] {
return func(ctx context.Context) IOEither[Trampoline[int, string]] {
return func() Either[Trampoline[int, string]] {
if n <= 0 {
return E.Right[error](tailrec.Land[int]("Done!"))
}
return E.Right[error](tailrec.Bounce[string](n - 1))
}
}
}
countdown := TailRec(countdownStep)
result := countdown(5)(context.Background())()
assert.Equal(t, E.Of[error]("Done!"), result)
}
func TestTailRec_FactorialRecursion(t *testing.T) {
// Test factorial computation using tail recursion
type FactorialState struct {
n int
acc int
}
factorialStep := func(state FactorialState) ReaderIOResult[Trampoline[FactorialState, int]] {
return func(ctx context.Context) IOEither[Trampoline[FactorialState, int]] {
return func() Either[Trampoline[FactorialState, int]] {
if state.n <= 1 {
return E.Right[error](tailrec.Land[FactorialState](state.acc))
}
return E.Right[error](tailrec.Bounce[int](FactorialState{
n: state.n - 1,
acc: state.acc * state.n,
}))
}
}
}
factorial := TailRec(factorialStep)
result := factorial(FactorialState{n: 5, acc: 1})(context.Background())()
assert.Equal(t, E.Of[error](120), result) // 5! = 120
}
func TestTailRec_ErrorHandling(t *testing.T) {
// Test that errors are properly propagated
testErr := errors.New("computation error")
errorStep := func(n int) ReaderIOResult[Trampoline[int, string]] {
return func(ctx context.Context) IOEither[Trampoline[int, string]] {
return func() Either[Trampoline[int, string]] {
if n == 3 {
return E.Left[Trampoline[int, string]](testErr)
}
if n <= 0 {
return E.Right[error](tailrec.Land[int]("Done!"))
}
return E.Right[error](tailrec.Bounce[string](n - 1))
}
}
}
errorRecursion := TailRec(errorStep)
result := errorRecursion(5)(context.Background())()
assert.True(t, E.IsLeft(result))
err := E.ToError(result)
assert.Equal(t, testErr, err)
}
func TestTailRec_ContextCancellation(t *testing.T) {
// Test that recursion gets cancelled early when context is canceled
var iterationCount int32
slowStep := func(n int) ReaderIOResult[Trampoline[int, string]] {
return func(ctx context.Context) IOEither[Trampoline[int, string]] {
return func() Either[Trampoline[int, string]] {
atomic.AddInt32(&iterationCount, 1)
// Simulate some work
time.Sleep(50 * time.Millisecond)
if n <= 0 {
return E.Right[error](tailrec.Land[int]("Done!"))
}
return E.Right[error](tailrec.Bounce[string](n - 1))
}
}
}
slowRecursion := TailRec(slowStep)
// Create a context that will be cancelled after 100ms
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
start := time.Now()
result := slowRecursion(10)(ctx)()
elapsed := time.Since(start)
// Should be cancelled and return an error
assert.True(t, E.IsLeft(result))
// Should complete quickly due to cancellation (much less than 10 * 50ms = 500ms)
assert.Less(t, elapsed, 200*time.Millisecond)
// Should have executed only a few iterations before cancellation
iterations := atomic.LoadInt32(&iterationCount)
assert.Less(t, iterations, int32(5), "Should have been cancelled before completing all iterations")
}
func TestTailRec_ImmediateCancellation(t *testing.T) {
// Test with an already cancelled context
countdownStep := func(n int) ReaderIOResult[Trampoline[int, string]] {
return func(ctx context.Context) IOEither[Trampoline[int, string]] {
return func() Either[Trampoline[int, string]] {
if n <= 0 {
return E.Right[error](tailrec.Land[int]("Done!"))
}
return E.Right[error](tailrec.Bounce[string](n - 1))
}
}
}
countdown := TailRec(countdownStep)
// Create an already cancelled context
ctx, cancel := context.WithCancel(context.Background())
cancel()
result := countdown(5)(ctx)()
// Should immediately return a cancellation error
assert.True(t, E.IsLeft(result))
err := E.ToError(result)
assert.Equal(t, context.Canceled, err)
}
func TestTailRec_StackSafety(t *testing.T) {
// Test that deep recursion doesn't cause stack overflow
const largeN = 10000
countdownStep := func(n int) ReaderIOResult[Trampoline[int, int]] {
return func(ctx context.Context) IOEither[Trampoline[int, int]] {
return func() Either[Trampoline[int, int]] {
if n <= 0 {
return E.Right[error](tailrec.Land[int](0))
}
return E.Right[error](tailrec.Bounce[int](n - 1))
}
}
}
countdown := TailRec(countdownStep)
result := countdown(largeN)(context.Background())()
assert.Equal(t, E.Of[error](0), result)
}
func TestTailRec_StackSafetyWithCancellation(t *testing.T) {
// Test stack safety with cancellation after many iterations
const largeN = 100000
var iterationCount int32
countdownStep := func(n int) ReaderIOResult[Trampoline[int, int]] {
return func(ctx context.Context) IOEither[Trampoline[int, int]] {
return func() Either[Trampoline[int, int]] {
atomic.AddInt32(&iterationCount, 1)
// Add a small delay every 1000 iterations to make cancellation more likely
if n%1000 == 0 {
time.Sleep(1 * time.Millisecond)
}
if n <= 0 {
return E.Right[error](tailrec.Land[int](0))
}
return E.Right[error](tailrec.Bounce[int](n - 1))
}
}
}
countdown := TailRec(countdownStep)
// Cancel after 50ms to allow some iterations but not all
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
defer cancel()
result := countdown(largeN)(ctx)()
// Should be cancelled (or completed if very fast)
// The key is that it doesn't cause a stack overflow
iterations := atomic.LoadInt32(&iterationCount)
assert.Greater(t, iterations, int32(0))
// If it was cancelled, verify it didn't complete all iterations
if E.IsLeft(result) {
assert.Less(t, iterations, int32(largeN))
}
}
func TestTailRec_ComplexState(t *testing.T) {
// Test with more complex state management
type ProcessState struct {
items []string
processed []string
errors []error
}
processStep := func(state ProcessState) ReaderIOResult[Trampoline[ProcessState, []string]] {
return func(ctx context.Context) IOEither[Trampoline[ProcessState, []string]] {
return func() Either[Trampoline[ProcessState, []string]] {
if A.IsEmpty(state.items) {
return E.Right[error](tailrec.Land[ProcessState](state.processed))
}
item := state.items[0]
// Simulate processing that might fail for certain items
if item == "error-item" {
return E.Left[Trampoline[ProcessState, []string]](
fmt.Errorf("failed to process item: %s", item))
}
return E.Right[error](tailrec.Bounce[[]string](ProcessState{
items: state.items[1:],
processed: append(state.processed, item),
errors: state.errors,
}))
}
}
}
processItems := TailRec(processStep)
t.Run("successful processing", func(t *testing.T) {
initialState := ProcessState{
items: []string{"item1", "item2", "item3"},
processed: []string{},
errors: []error{},
}
result := processItems(initialState)(context.Background())()
assert.Equal(t, E.Of[error]([]string{"item1", "item2", "item3"}), result)
})
t.Run("processing with error", func(t *testing.T) {
initialState := ProcessState{
items: []string{"item1", "error-item", "item3"},
processed: []string{},
errors: []error{},
}
result := processItems(initialState)(context.Background())()
assert.True(t, E.IsLeft(result))
err := E.ToError(result)
assert.Contains(t, err.Error(), "failed to process item: error-item")
})
}
func TestTailRec_CancellationDuringProcessing(t *testing.T) {
// Test cancellation during a realistic processing scenario
type FileProcessState struct {
files []string
processed int
}
var processedCount int32
processFileStep := func(state FileProcessState) ReaderIOResult[Trampoline[FileProcessState, int]] {
return func(ctx context.Context) IOEither[Trampoline[FileProcessState, int]] {
return func() Either[Trampoline[FileProcessState, int]] {
if A.IsEmpty(state.files) {
return E.Right[error](tailrec.Land[FileProcessState](state.processed))
}
// Simulate file processing time
time.Sleep(20 * time.Millisecond)
atomic.AddInt32(&processedCount, 1)
return E.Right[error](tailrec.Bounce[int](FileProcessState{
files: state.files[1:],
processed: state.processed + 1,
}))
}
}
}
processFiles := TailRec(processFileStep)
// Create many files to process
files := make([]string, 20)
for i := range files {
files[i] = fmt.Sprintf("file%d.txt", i)
}
initialState := FileProcessState{
files: files,
processed: 0,
}
// Cancel after 100ms (should allow ~5 files to be processed)
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
defer cancel()
start := time.Now()
result := processFiles(initialState)(ctx)()
elapsed := time.Since(start)
// Should be cancelled
assert.True(t, E.IsLeft(result))
// Should complete quickly due to cancellation
assert.Less(t, elapsed, 150*time.Millisecond)
// Should have processed some but not all files
processed := atomic.LoadInt32(&processedCount)
assert.Greater(t, processed, int32(0))
assert.Less(t, processed, int32(20))
}
func TestTailRec_ZeroIterations(t *testing.T) {
// Test case where recursion terminates immediately
immediateStep := func(n int) ReaderIOResult[Trampoline[int, string]] {
return func(ctx context.Context) IOEither[Trampoline[int, string]] {
return func() Either[Trampoline[int, string]] {
return E.Right[error](tailrec.Land[int]("immediate"))
}
}
}
immediate := TailRec(immediateStep)
result := immediate(100)(context.Background())()
assert.Equal(t, E.Of[error]("immediate"), result)
}
func TestTailRec_ContextWithDeadline(t *testing.T) {
// Test with context deadline
var iterationCount int32
slowStep := func(n int) ReaderIOResult[Trampoline[int, string]] {
return func(ctx context.Context) IOEither[Trampoline[int, string]] {
return func() Either[Trampoline[int, string]] {
atomic.AddInt32(&iterationCount, 1)
time.Sleep(30 * time.Millisecond)
if n <= 0 {
return E.Right[error](tailrec.Land[int]("Done!"))
}
return E.Right[error](tailrec.Bounce[string](n - 1))
}
}
}
slowRecursion := TailRec(slowStep)
// Set deadline 80ms from now
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(80*time.Millisecond))
defer cancel()
result := slowRecursion(10)(ctx)()
// Should be cancelled due to deadline
assert.True(t, E.IsLeft(result))
// Should have executed only a few iterations
iterations := atomic.LoadInt32(&iterationCount)
assert.Greater(t, iterations, int32(0))
assert.Less(t, iterations, int32(5))
}
func TestTailRec_ContextWithValue(t *testing.T) {
// Test that context values are preserved through recursion
type contextKey string
const testKey contextKey = "test"
valueStep := func(n int) ReaderIOResult[Trampoline[int, string]] {
return func(ctx context.Context) IOEither[Trampoline[int, string]] {
return func() Either[Trampoline[int, string]] {
value := ctx.Value(testKey)
require.NotNil(t, value)
assert.Equal(t, "test-value", value.(string))
if n <= 0 {
return E.Right[error](tailrec.Land[int]("Done!"))
}
return E.Right[error](tailrec.Bounce[string](n - 1))
}
}
}
valueRecursion := TailRec(valueStep)
ctx := context.WithValue(context.Background(), testKey, "test-value")
result := valueRecursion(3)(ctx)()
assert.Equal(t, E.Of[error]("Done!"), result)
}

View File

@@ -16,7 +16,11 @@
package readerioresult package readerioresult
import ( import (
"context"
"io"
RIOR "github.com/IBM/fp-go/v2/readerioresult" RIOR "github.com/IBM/fp-go/v2/readerioresult"
"github.com/IBM/fp-go/v2/result"
) )
// WithResource constructs a function that creates a resource, then operates on it and then releases the resource. // WithResource constructs a function that creates a resource, then operates on it and then releases the resource.
@@ -55,3 +59,111 @@ import (
func WithResource[A, R, ANY any](onCreate ReaderIOResult[R], onRelease Kleisli[R, ANY]) Kleisli[Kleisli[R, A], A] { func WithResource[A, R, ANY any](onCreate ReaderIOResult[R], onRelease Kleisli[R, ANY]) Kleisli[Kleisli[R, A], A] {
return RIOR.WithResource[A](onCreate, onRelease) return RIOR.WithResource[A](onCreate, onRelease)
} }
// onClose is a helper function that creates a ReaderIOResult for closing an io.Closer resource.
// It safely calls the Close() method and handles any errors that may occur during closing.
//
// Type Parameters:
// - A: Must implement io.Closer interface
//
// Parameters:
// - a: The resource to close
//
// Returns:
// - ReaderIOResult[any]: A computation that closes the resource and returns nil on success
//
// The function ignores the context parameter since closing operations typically don't need context.
// Any error from Close() is captured and returned as a Result error.
func onClose[A io.Closer](a A) ReaderIOResult[any] {
return func(_ context.Context) IOResult[any] {
return func() Result[any] {
return result.TryCatchError[any](nil, a.Close())
}
}
}
// WithCloser creates a resource management function specifically for io.Closer resources.
// This is a specialized version of WithResource that automatically handles closing of resources
// that implement the io.Closer interface.
//
// The function ensures that:
// - The resource is created using the onCreate function
// - The resource is automatically closed when the operation completes (success or failure)
// - Any errors during closing are properly handled
// - The resource is closed even if the main operation fails or the context is canceled
//
// Type Parameters:
// - B: The type of value returned by the resource-using function
// - A: The type of resource that implements io.Closer
//
// Parameters:
// - onCreate: ReaderIOResult that creates the io.Closer resource
//
// Returns:
// - A function that takes a resource-using function and returns a ReaderIOResult[B]
//
// Example with file operations:
//
// openFile := func(filename string) ReaderIOResult[*os.File] {
// return TryCatch(func(ctx context.Context) func() (*os.File, error) {
// return func() (*os.File, error) {
// return os.Open(filename)
// }
// })
// }
//
// fileReader := WithCloser(openFile("data.txt"))
// result := fileReader(func(f *os.File) ReaderIOResult[string] {
// return TryCatch(func(ctx context.Context) func() (string, error) {
// return func() (string, error) {
// data, err := io.ReadAll(f)
// return string(data), err
// }
// })
// })
//
// Example with HTTP response:
//
// httpGet := func(url string) ReaderIOResult[*http.Response] {
// return TryCatch(func(ctx context.Context) func() (*http.Response, error) {
// return func() (*http.Response, error) {
// return http.Get(url)
// }
// })
// }
//
// responseReader := WithCloser(httpGet("https://api.example.com/data"))
// result := responseReader(func(resp *http.Response) ReaderIOResult[[]byte] {
// return TryCatch(func(ctx context.Context) func() ([]byte, error) {
// return func() ([]byte, error) {
// return io.ReadAll(resp.Body)
// }
// })
// })
//
// Example with database connection:
//
// openDB := func(dsn string) ReaderIOResult[*sql.DB] {
// return TryCatch(func(ctx context.Context) func() (*sql.DB, error) {
// return func() (*sql.DB, error) {
// return sql.Open("postgres", dsn)
// }
// })
// }
//
// dbQuery := WithCloser(openDB("postgres://..."))
// result := dbQuery(func(db *sql.DB) ReaderIOResult[[]User] {
// return TryCatch(func(ctx context.Context) func() ([]User, error) {
// return func() ([]User, error) {
// rows, err := db.QueryContext(ctx, "SELECT * FROM users")
// if err != nil {
// return nil, err
// }
// defer rows.Close()
// return scanUsers(rows)
// }
// })
// })
func WithCloser[B any, A io.Closer](onCreate ReaderIOResult[A]) Kleisli[Kleisli[A, B], B] {
return WithResource[B](onCreate, onClose[A])
}

View File

@@ -0,0 +1,179 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache LicensVersion 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerioresult
import (
"context"
"time"
RIO "github.com/IBM/fp-go/v2/context/readerio"
R "github.com/IBM/fp-go/v2/retry"
RG "github.com/IBM/fp-go/v2/retry/generic"
)
// Retrying retries a ReaderIOResult computation according to a retry policy with context awareness.
//
// This function implements a retry mechanism for operations that depend on a [context.Context],
// perform side effects (IO), and can fail (Result). It respects context cancellation, meaning
// that if the context is cancelled during retry delays, the operation will stop immediately
// and return the cancellation error.
//
// The retry loop will continue until one of the following occurs:
// - The action succeeds and the check function returns false (no retry needed)
// - The retry policy returns None (retry limit reached)
// - The check function returns false (indicating success or a non-retryable failure)
// - The context is cancelled (returns context.Canceled or context.DeadlineExceeded)
//
// Parameters:
//
// - policy: A RetryPolicy that determines when and how long to wait between retries.
// The policy receives a RetryStatus on each iteration and returns an optional delay.
// If it returns None, retrying stops. Common policies include LimitRetries,
// ExponentialBackoff, and CapDelay from the retry package.
//
// - action: A Kleisli arrow that takes a RetryStatus and returns a ReaderIOResult[A].
// This function is called on each retry attempt and receives information about the
// current retry state (iteration number, cumulative delay, etc.). The action depends
// on a context.Context and produces a Result[A]. The context passed to the action
// will be the same context used for retry delays, so cancellation is properly propagated.
//
// - check: A predicate function that examines the Result[A] and returns true if the
// operation should be retried, or false if it should stop. This allows you to
// distinguish between retryable failures (e.g., network timeouts) and permanent
// failures (e.g., invalid input). Note that context cancellation errors will
// automatically stop retrying regardless of this function's return value.
//
// Returns:
//
// A ReaderIOResult[A] that, when executed with a context, will perform the retry
// logic with context cancellation support and return the final result.
//
// Type Parameters:
// - A: The type of the success value
//
// Context Cancellation:
//
// The retry mechanism respects context cancellation in two ways:
// 1. During retry delays: If the context is cancelled while waiting between retries,
// the operation stops immediately and returns the context error.
// 2. During action execution: If the action itself checks the context and returns
// an error due to cancellation, the retry loop will stop (assuming the check
// function doesn't force a retry on context errors).
//
// Example:
//
// // Create a retry policy: exponential backoff with a cap, limited to 5 retries
// policy := M.Concat(
// retry.LimitRetries(5),
// retry.CapDelay(10*time.Second, retry.ExponentialBackoff(100*time.Millisecond)),
// )(retry.Monoid)
//
// // Action that fetches data, with retry status information
// fetchData := func(status retry.RetryStatus) ReaderIOResult[string] {
// return func(ctx context.Context) IOResult[string] {
// return func() Result[string] {
// // Check if context is cancelled
// if ctx.Err() != nil {
// return result.Left[string](ctx.Err())
// }
// // Simulate an HTTP request that might fail
// if status.IterNumber < 3 {
// return result.Left[string](fmt.Errorf("temporary error"))
// }
// return result.Of("success")
// }
// }
// }
//
// // Check function: retry on any error except context cancellation
// shouldRetry := func(r Result[string]) bool {
// return result.IsLeft(r) && !errors.Is(result.GetLeft(r), context.Canceled)
// }
//
// // Create the retrying computation
// retryingFetch := Retrying(policy, fetchData, shouldRetry)
//
// // Execute with a cancellable context
// ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
// defer cancel()
// ioResult := retryingFetch(ctx)
// finalResult := ioResult()
//
// See also:
// - retry.RetryPolicy for available retry policies
// - retry.RetryStatus for information passed to the action
// - context.Context for context cancellation semantics
//
//go:inline
func Retrying[A any](
policy R.RetryPolicy,
action Kleisli[R.RetryStatus, A],
check func(Result[A]) bool,
) ReaderIOResult[A] {
// delayWithCancel implements a context-aware delay mechanism for retry operations.
// It creates a timeout context that will be cancelled when either:
// 1. The delay duration expires (normal case), or
// 2. The parent context is cancelled (early termination)
//
// The function waits on timeoutCtx.Done(), which will be signaled in either case:
// - If the delay expires, timeoutCtx is cancelled by the timeout
// - If the parent ctx is cancelled, timeoutCtx inherits the cancellation
//
// After the wait completes, we dispatch to the next action by calling ri(ctx)().
// This works correctly because the action is wrapped in WithContextK, which handles
// context cancellation by checking ctx.Err() and returning an appropriate error
// (context.Canceled or context.DeadlineExceeded) when the context is cancelled.
//
// This design ensures that:
// - Retry delays respect context cancellation and terminate immediately
// - The cancellation error propagates correctly through the retry chain
// - No unnecessary delays occur when the context is already cancelled
delayWithCancel := func(delay time.Duration) RIO.Operator[R.RetryStatus, R.RetryStatus] {
return func(ri ReaderIO[R.RetryStatus]) ReaderIO[R.RetryStatus] {
return func(ctx context.Context) IO[R.RetryStatus] {
return func() R.RetryStatus {
// Create a timeout context that will be cancelled when either:
// - The delay duration expires, or
// - The parent context is cancelled
timeoutCtx, cancelTimeout := context.WithTimeout(ctx, delay)
defer cancelTimeout()
// Wait for either the timeout or parent context cancellation
<-timeoutCtx.Done()
// Dispatch to the next action with the original context.
// WithContextK will handle context cancellation correctly.
return ri(ctx)()
}
}
}
}
// get an implementation for the types
return RG.Retrying(
RIO.Chain[Result[A], Result[A]],
RIO.Chain[R.RetryStatus, Result[A]],
RIO.Of[Result[A]],
RIO.Of[R.RetryStatus],
delayWithCancel,
policy,
WithContextK(action),
check,
)
}

View File

@@ -0,0 +1,511 @@
// Copyright (c) 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerioresult
import (
"context"
"errors"
"fmt"
"testing"
"time"
"github.com/IBM/fp-go/v2/result"
R "github.com/IBM/fp-go/v2/retry"
"github.com/stretchr/testify/assert"
)
// Helper function to create a test retry policy
func testRetryPolicy() R.RetryPolicy {
return R.Monoid.Concat(
R.LimitRetries(5),
R.CapDelay(1*time.Second, R.ExponentialBackoff(10*time.Millisecond)),
)
}
// TestRetrying_SuccessOnFirstAttempt tests that Retrying succeeds immediately
// when the action succeeds on the first attempt.
func TestRetrying_SuccessOnFirstAttempt(t *testing.T) {
policy := testRetryPolicy()
action := func(status R.RetryStatus) ReaderIOResult[string] {
return func(ctx context.Context) IOResult[string] {
return func() Result[string] {
return result.Of("success")
}
}
}
check := func(r Result[string]) bool {
return result.IsLeft(r)
}
retrying := Retrying(policy, action, check)
ctx := t.Context()
res := retrying(ctx)()
assert.Equal(t, result.Of("success"), res)
}
// TestRetrying_SuccessAfterRetries tests that Retrying eventually succeeds
// after a few failed attempts.
func TestRetrying_SuccessAfterRetries(t *testing.T) {
policy := testRetryPolicy()
action := func(status R.RetryStatus) ReaderIOResult[string] {
return func(ctx context.Context) IOResult[string] {
return func() Result[string] {
// Fail on first 3 attempts, succeed on 4th
if status.IterNumber < 3 {
return result.Left[string](fmt.Errorf("attempt %d failed", status.IterNumber))
}
return result.Of(fmt.Sprintf("success on attempt %d", status.IterNumber))
}
}
}
check := func(r Result[string]) bool {
return result.IsLeft(r)
}
retrying := Retrying(policy, action, check)
ctx := t.Context()
res := retrying(ctx)()
assert.Equal(t, result.Of("success on attempt 3"), res)
}
// TestRetrying_ExhaustsRetries tests that Retrying stops after the retry limit
// is reached and returns the last error.
func TestRetrying_ExhaustsRetries(t *testing.T) {
policy := R.LimitRetries(3)
action := func(status R.RetryStatus) ReaderIOResult[string] {
return func(ctx context.Context) IOResult[string] {
return func() Result[string] {
return result.Left[string](fmt.Errorf("attempt %d failed", status.IterNumber))
}
}
}
check := func(r Result[string]) bool {
return result.IsLeft(r)
}
retrying := Retrying(policy, action, check)
ctx := t.Context()
res := retrying(ctx)()
assert.True(t, result.IsLeft(res))
assert.Equal(t, result.Left[string](fmt.Errorf("attempt 3 failed")), res)
}
// TestRetrying_ActionChecksContextCancellation tests that actions can check
// the context and return early if it's cancelled.
func TestRetrying_ActionChecksContextCancellation(t *testing.T) {
policy := R.LimitRetries(10)
attemptCount := 0
action := func(status R.RetryStatus) ReaderIOResult[string] {
return func(ctx context.Context) IOResult[string] {
return func() Result[string] {
attemptCount++
// Check context at the start of the action
if ctx.Err() != nil {
return result.Left[string](ctx.Err())
}
// Simulate work that might take time
time.Sleep(10 * time.Millisecond)
// Check context again after work
if ctx.Err() != nil {
return result.Left[string](ctx.Err())
}
// Always fail to trigger retries
return result.Left[string](fmt.Errorf("attempt %d failed", status.IterNumber))
}
}
}
check := func(r Result[string]) bool {
// Don't retry on context errors
val, err := result.Unwrap(r)
_ = val
if err != nil && (errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded)) {
return false
}
return result.IsLeft(r)
}
retrying := Retrying(policy, action, check)
// Create a context that we'll cancel after a short time
ctx, cancel := context.WithCancel(t.Context())
// Start the retry operation in a goroutine
resultChan := make(chan Result[string], 1)
go func() {
res := retrying(ctx)()
resultChan <- res
}()
// Cancel the context after allowing a couple attempts
time.Sleep(50 * time.Millisecond)
cancel()
// Wait for the result
res := <-resultChan
// Should have stopped due to context cancellation
assert.True(t, result.IsLeft(res))
// Should have stopped early (not all 10 attempts)
assert.Less(t, attemptCount, 10, "Should stop retrying when action detects context cancellation")
// The error should be related to context cancellation or an early attempt
val, err := result.Unwrap(res)
_ = val
assert.Error(t, err)
}
// TestRetrying_ContextCancelledBeforeStart tests that if the context is already
// cancelled before starting, the operation fails immediately.
func TestRetrying_ContextCancelledBeforeStart(t *testing.T) {
policy := testRetryPolicy()
attemptCount := 0
action := func(status R.RetryStatus) ReaderIOResult[string] {
return func(ctx context.Context) IOResult[string] {
return func() Result[string] {
attemptCount++
// Check context before doing work
if ctx.Err() != nil {
return result.Left[string](ctx.Err())
}
return result.Left[string](fmt.Errorf("attempt %d failed", status.IterNumber))
}
}
}
check := func(r Result[string]) bool {
// Don't retry on context errors
val, err := result.Unwrap(r)
_ = val
if err != nil && errors.Is(err, context.Canceled) {
return false
}
return result.IsLeft(r)
}
retrying := Retrying(policy, action, check)
// Create an already-cancelled context
ctx, cancel := context.WithCancel(t.Context())
cancel()
res := retrying(ctx)()
assert.True(t, result.IsLeft(res))
val, err := result.Unwrap(res)
_ = val
assert.True(t, errors.Is(err, context.Canceled))
// Should have attempted at most once
assert.LessOrEqual(t, attemptCount, 1)
}
// TestRetrying_ContextTimeoutInAction tests that actions respect context deadlines.
func TestRetrying_ContextTimeoutInAction(t *testing.T) {
policy := R.LimitRetries(10)
attemptCount := 0
action := func(status R.RetryStatus) ReaderIOResult[string] {
return func(ctx context.Context) IOResult[string] {
return func() Result[string] {
attemptCount++
// Check context before doing work
if ctx.Err() != nil {
return result.Left[string](ctx.Err())
}
// Simulate some work
time.Sleep(50 * time.Millisecond)
// Check context after work
if ctx.Err() != nil {
return result.Left[string](ctx.Err())
}
// Always fail to trigger retries
return result.Left[string](fmt.Errorf("attempt %d failed", status.IterNumber))
}
}
}
check := func(r Result[string]) bool {
// Don't retry on context errors
val, err := result.Unwrap(r)
_ = val
if err != nil && (errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded)) {
return false
}
return result.IsLeft(r)
}
retrying := Retrying(policy, action, check)
// Create a context with a short timeout
ctx, cancel := context.WithTimeout(t.Context(), 150*time.Millisecond)
defer cancel()
startTime := time.Now()
res := retrying(ctx)()
elapsed := time.Since(startTime)
assert.True(t, result.IsLeft(res))
// Should have stopped before completing all 10 retries
assert.Less(t, attemptCount, 10, "Should stop retrying when action detects context timeout")
// Should have stopped around the timeout duration
assert.Less(t, elapsed, 500*time.Millisecond, "Should stop soon after timeout")
}
// TestRetrying_CheckFunctionStopsRetry tests that the check function can
// stop retrying even when errors occur.
func TestRetrying_CheckFunctionStopsRetry(t *testing.T) {
policy := testRetryPolicy()
action := func(status R.RetryStatus) ReaderIOResult[string] {
return func(ctx context.Context) IOResult[string] {
return func() Result[string] {
if status.IterNumber == 0 {
return result.Left[string](fmt.Errorf("retryable error"))
}
return result.Left[string](fmt.Errorf("permanent error"))
}
}
}
// Only retry on "retryable error"
check := func(r Result[string]) bool {
return result.IsLeft(r) && result.Fold(
func(err error) bool { return err.Error() == "retryable error" },
func(string) bool { return false },
)(r)
}
retrying := Retrying(policy, action, check)
ctx := t.Context()
res := retrying(ctx)()
assert.Equal(t, result.Left[string](fmt.Errorf("permanent error")), res)
}
// TestRetrying_ExponentialBackoff tests that exponential backoff is applied.
func TestRetrying_ExponentialBackoff(t *testing.T) {
// Use a policy with measurable delays
policy := R.Monoid.Concat(
R.LimitRetries(3),
R.ExponentialBackoff(50*time.Millisecond),
)
startTime := time.Now()
action := func(status R.RetryStatus) ReaderIOResult[string] {
return func(ctx context.Context) IOResult[string] {
return func() Result[string] {
if status.IterNumber < 2 {
return result.Left[string](fmt.Errorf("retry"))
}
return result.Of("success")
}
}
}
check := func(r Result[string]) bool {
return result.IsLeft(r)
}
retrying := Retrying(policy, action, check)
ctx := t.Context()
res := retrying(ctx)()
elapsed := time.Since(startTime)
assert.Equal(t, result.Of("success"), res)
// With exponential backoff starting at 50ms:
// Iteration 0: no delay
// Iteration 1: 50ms delay
// Iteration 2: 100ms delay
// Total should be at least 150ms
assert.GreaterOrEqual(t, elapsed, 150*time.Millisecond)
}
// TestRetrying_ContextValuePropagation tests that context values are properly
// propagated through the retry mechanism.
func TestRetrying_ContextValuePropagation(t *testing.T) {
policy := R.LimitRetries(2)
type contextKey string
const requestIDKey contextKey = "requestID"
action := func(status R.RetryStatus) ReaderIOResult[string] {
return func(ctx context.Context) IOResult[string] {
return func() Result[string] {
// Extract value from context
requestID, ok := ctx.Value(requestIDKey).(string)
if !ok {
return result.Left[string](fmt.Errorf("missing request ID"))
}
if status.IterNumber < 1 {
return result.Left[string](fmt.Errorf("retry needed"))
}
return result.Of(fmt.Sprintf("processed request %s", requestID))
}
}
}
check := func(r Result[string]) bool {
return result.IsLeft(r)
}
retrying := Retrying(policy, action, check)
// Create context with a value
ctx := context.WithValue(t.Context(), requestIDKey, "12345")
res := retrying(ctx)()
assert.Equal(t, result.Of("processed request 12345"), res)
}
// TestRetrying_RetryStatusProgression tests that the RetryStatus is properly
// updated on each iteration.
func TestRetrying_RetryStatusProgression(t *testing.T) {
policy := testRetryPolicy()
var iterations []uint
action := func(status R.RetryStatus) ReaderIOResult[int] {
return func(ctx context.Context) IOResult[int] {
return func() Result[int] {
iterations = append(iterations, status.IterNumber)
if status.IterNumber < 3 {
return result.Left[int](fmt.Errorf("retry"))
}
return result.Of(int(status.IterNumber))
}
}
}
check := func(r Result[int]) bool {
return result.IsLeft(r)
}
retrying := Retrying(policy, action, check)
ctx := t.Context()
res := retrying(ctx)()
assert.Equal(t, result.Of(3), res)
// Should have attempted iterations 0, 1, 2, 3
assert.Equal(t, []uint{0, 1, 2, 3}, iterations)
}
// TestRetrying_ContextCancelledDuringDelay tests that the retry operation
// stops immediately when the context is cancelled during a retry delay,
// even if there are still retries remaining according to the policy.
func TestRetrying_ContextCancelledDuringDelay(t *testing.T) {
// Use a policy with significant delays to ensure we can cancel during the delay
policy := R.Monoid.Concat(
R.LimitRetries(10),
R.ConstantDelay(200*time.Millisecond),
)
attemptCount := 0
action := func(status R.RetryStatus) ReaderIOResult[string] {
return func(ctx context.Context) IOResult[string] {
return func() Result[string] {
attemptCount++
// Always fail to trigger retries
return result.Left[string](fmt.Errorf("attempt %d failed", status.IterNumber))
}
}
}
// Always retry on errors (don't check for context cancellation in check function)
check := func(r Result[string]) bool {
return result.IsLeft(r)
}
retrying := Retrying(policy, action, check)
// Create a context that we'll cancel during the retry delay
ctx, cancel := context.WithCancel(t.Context())
// Start the retry operation in a goroutine
resultChan := make(chan Result[string], 1)
startTime := time.Now()
go func() {
res := retrying(ctx)()
resultChan <- res
}()
// Wait for the first attempt to complete and the delay to start
time.Sleep(50 * time.Millisecond)
// Cancel the context during the retry delay
cancel()
// Wait for the result
res := <-resultChan
elapsed := time.Since(startTime)
// Should have stopped due to context cancellation
assert.True(t, result.IsLeft(res))
// Should have attempted only once or twice (not all 10 attempts)
// because the context was cancelled during the delay
assert.LessOrEqual(t, attemptCount, 2, "Should stop retrying when context is cancelled during delay")
// Should have stopped quickly after cancellation, not waiting for all delays
// With 10 retries and 200ms delays, it would take ~2 seconds without cancellation
// With cancellation during first delay, it should complete in well under 500ms
assert.Less(t, elapsed, 500*time.Millisecond, "Should stop immediately when context is cancelled during delay")
// When context is cancelled during the delay, the retry mechanism
// detects the cancellation and returns a context error
val, err := result.Unwrap(res)
_ = val
assert.Error(t, err)
// The error should be a context cancellation error since cancellation
// happened during the delay between retries
assert.True(t, errors.Is(err, context.Canceled), "Should return context.Canceled when cancelled during delay")
}

View File

@@ -16,8 +16,9 @@
package readerioresult package readerioresult
import ( import (
"github.com/IBM/fp-go/v2/array"
"github.com/IBM/fp-go/v2/function" "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/internal/array" F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/internal/record" "github.com/IBM/fp-go/v2/internal/record"
) )
@@ -29,12 +30,12 @@ import (
// //
// Returns a function that transforms an array into a ReaderIOResult of an array. // Returns a function that transforms an array into a ReaderIOResult of an array.
func TraverseArray[A, B any](f Kleisli[A, B]) Kleisli[[]A, []B] { func TraverseArray[A, B any](f Kleisli[A, B]) Kleisli[[]A, []B] {
return array.Traverse[[]A]( return array.Traverse(
Of[[]B], Of[[]B],
Map[[]B, func(B) []B], Map[[]B, func(B) []B],
Ap[[]B, B], Ap[[]B, B],
f, F.Flow2(f, WithContext),
) )
} }
@@ -46,7 +47,7 @@ func TraverseArray[A, B any](f Kleisli[A, B]) Kleisli[[]A, []B] {
// //
// Returns a function that transforms an array into a ReaderIOResult of an array. // Returns a function that transforms an array into a ReaderIOResult of an array.
func TraverseArrayWithIndex[A, B any](f func(int, A) ReaderIOResult[B]) Kleisli[[]A, []B] { func TraverseArrayWithIndex[A, B any](f func(int, A) ReaderIOResult[B]) Kleisli[[]A, []B] {
return array.TraverseWithIndex[[]A]( return array.TraverseWithIndex(
Of[[]B], Of[[]B],
Map[[]B, func(B) []B], Map[[]B, func(B) []B],
Ap[[]B, B], Ap[[]B, B],
@@ -78,7 +79,7 @@ func TraverseRecord[K comparable, A, B any](f Kleisli[A, B]) Kleisli[map[K]A, ma
Map[map[K]B, func(B) map[K]B], Map[map[K]B, func(B) map[K]B],
Ap[map[K]B, B], Ap[map[K]B, B],
f, F.Flow2(f, WithContext),
) )
} }
@@ -123,7 +124,7 @@ func MonadTraverseArraySeq[A, B any](as []A, f Kleisli[A, B]) ReaderIOResult[[]B
Map[[]B, func(B) []B], Map[[]B, func(B) []B],
ApSeq[[]B, B], ApSeq[[]B, B],
as, as,
f, F.Flow2(f, WithContext),
) )
} }
@@ -135,22 +136,20 @@ func MonadTraverseArraySeq[A, B any](as []A, f Kleisli[A, B]) ReaderIOResult[[]B
// //
// Returns a function that transforms an array into a ReaderIOResult of an array. // Returns a function that transforms an array into a ReaderIOResult of an array.
func TraverseArraySeq[A, B any](f Kleisli[A, B]) Kleisli[[]A, []B] { func TraverseArraySeq[A, B any](f Kleisli[A, B]) Kleisli[[]A, []B] {
return array.Traverse[[]A]( return array.Traverse(
Of[[]B], Of[[]B],
Map[[]B, func(B) []B], Map[[]B, func(B) []B],
ApSeq[[]B, B], ApSeq[[]B, B],
F.Flow2(f, WithContext),
f,
) )
} }
// TraverseArrayWithIndexSeq uses transforms an array [[]A] into [[]ReaderIOResult[B]] and then resolves that into a [ReaderIOResult[[]B]] // TraverseArrayWithIndexSeq uses transforms an array [[]A] into [[]ReaderIOResult[B]] and then resolves that into a [ReaderIOResult[[]B]]
func TraverseArrayWithIndexSeq[A, B any](f func(int, A) ReaderIOResult[B]) Kleisli[[]A, []B] { func TraverseArrayWithIndexSeq[A, B any](f func(int, A) ReaderIOResult[B]) Kleisli[[]A, []B] {
return array.TraverseWithIndex[[]A]( return array.TraverseWithIndex(
Of[[]B], Of[[]B],
Map[[]B, func(B) []B], Map[[]B, func(B) []B],
ApSeq[[]B, B], ApSeq[[]B, B],
f, f,
) )
} }
@@ -173,7 +172,7 @@ func MonadTraverseRecordSeq[K comparable, A, B any](as map[K]A, f Kleisli[A, B])
Map[map[K]B, func(B) map[K]B], Map[map[K]B, func(B) map[K]B],
ApSeq[map[K]B, B], ApSeq[map[K]B, B],
as, as,
f, F.Flow2(f, WithContext),
) )
} }
@@ -184,7 +183,7 @@ func TraverseRecordSeq[K comparable, A, B any](f Kleisli[A, B]) Kleisli[map[K]A,
Map[map[K]B, func(B) map[K]B], Map[map[K]B, func(B) map[K]B],
ApSeq[map[K]B, B], ApSeq[map[K]B, B],
f, F.Flow2(f, WithContext),
) )
} }
@@ -218,7 +217,7 @@ func MonadTraverseArrayPar[A, B any](as []A, f Kleisli[A, B]) ReaderIOResult[[]B
Map[[]B, func(B) []B], Map[[]B, func(B) []B],
ApPar[[]B, B], ApPar[[]B, B],
as, as,
f, F.Flow2(f, WithContext),
) )
} }
@@ -230,22 +229,20 @@ func MonadTraverseArrayPar[A, B any](as []A, f Kleisli[A, B]) ReaderIOResult[[]B
// //
// Returns a function that transforms an array into a ReaderIOResult of an array. // Returns a function that transforms an array into a ReaderIOResult of an array.
func TraverseArrayPar[A, B any](f Kleisli[A, B]) Kleisli[[]A, []B] { func TraverseArrayPar[A, B any](f Kleisli[A, B]) Kleisli[[]A, []B] {
return array.Traverse[[]A]( return array.Traverse(
Of[[]B], Of[[]B],
Map[[]B, func(B) []B], Map[[]B, func(B) []B],
ApPar[[]B, B], ApPar[[]B, B],
F.Flow2(f, WithContext),
f,
) )
} }
// TraverseArrayWithIndexPar uses transforms an array [[]A] into [[]ReaderIOResult[B]] and then resolves that into a [ReaderIOResult[[]B]] // TraverseArrayWithIndexPar uses transforms an array [[]A] into [[]ReaderIOResult[B]] and then resolves that into a [ReaderIOResult[[]B]]
func TraverseArrayWithIndexPar[A, B any](f func(int, A) ReaderIOResult[B]) Kleisli[[]A, []B] { func TraverseArrayWithIndexPar[A, B any](f func(int, A) ReaderIOResult[B]) Kleisli[[]A, []B] {
return array.TraverseWithIndex[[]A]( return array.TraverseWithIndex(
Of[[]B], Of[[]B],
Map[[]B, func(B) []B], Map[[]B, func(B) []B],
ApPar[[]B, B], ApPar[[]B, B],
f, f,
) )
} }
@@ -268,7 +265,7 @@ func TraverseRecordPar[K comparable, A, B any](f Kleisli[A, B]) Kleisli[map[K]A,
Map[map[K]B, func(B) map[K]B], Map[map[K]B, func(B) map[K]B],
ApPar[map[K]B, B], ApPar[map[K]B, B],
f, F.Flow2(f, WithContext),
) )
} }
@@ -290,7 +287,7 @@ func MonadTraverseRecordPar[K comparable, A, B any](as map[K]A, f Kleisli[A, B])
Map[map[K]B, func(B) map[K]B], Map[map[K]B, func(B) map[K]B],
ApPar[map[K]B, B], ApPar[map[K]B, B],
as, as,
f, F.Flow2(f, WithContext),
) )
} }

View File

@@ -18,12 +18,16 @@ package readerioresult
import ( import (
"context" "context"
"github.com/IBM/fp-go/v2/consumer"
"github.com/IBM/fp-go/v2/context/ioresult" "github.com/IBM/fp-go/v2/context/ioresult"
"github.com/IBM/fp-go/v2/context/readerresult" "github.com/IBM/fp-go/v2/context/readerresult"
"github.com/IBM/fp-go/v2/either" "github.com/IBM/fp-go/v2/either"
"github.com/IBM/fp-go/v2/endomorphism"
"github.com/IBM/fp-go/v2/io" "github.com/IBM/fp-go/v2/io"
"github.com/IBM/fp-go/v2/ioeither" "github.com/IBM/fp-go/v2/ioeither"
"github.com/IBM/fp-go/v2/lazy" "github.com/IBM/fp-go/v2/lazy"
"github.com/IBM/fp-go/v2/optics/lens"
"github.com/IBM/fp-go/v2/optics/prism"
"github.com/IBM/fp-go/v2/option" "github.com/IBM/fp-go/v2/option"
"github.com/IBM/fp-go/v2/reader" "github.com/IBM/fp-go/v2/reader"
"github.com/IBM/fp-go/v2/readereither" "github.com/IBM/fp-go/v2/readereither"
@@ -31,6 +35,7 @@ import (
RIOR "github.com/IBM/fp-go/v2/readerioresult" RIOR "github.com/IBM/fp-go/v2/readerioresult"
"github.com/IBM/fp-go/v2/readeroption" "github.com/IBM/fp-go/v2/readeroption"
"github.com/IBM/fp-go/v2/result" "github.com/IBM/fp-go/v2/result"
"github.com/IBM/fp-go/v2/tailrec"
) )
type ( type (
@@ -126,4 +131,13 @@ type (
ReaderResult[A any] = readerresult.ReaderResult[A] ReaderResult[A any] = readerresult.ReaderResult[A]
ReaderEither[R, E, A any] = readereither.ReaderEither[R, E, A] ReaderEither[R, E, A any] = readereither.ReaderEither[R, E, A]
ReaderOption[R, A any] = readeroption.ReaderOption[R, A] ReaderOption[R, A any] = readeroption.ReaderOption[R, A]
Endomorphism[A any] = endomorphism.Endomorphism[A]
Consumer[A any] = consumer.Consumer[A]
Prism[S, T any] = prism.Prism[S, T]
Lens[S, T any] = lens.Lens[S, T]
Trampoline[B, L any] = tailrec.Trampoline[B, L]
) )

View File

@@ -15,11 +15,14 @@
package readerresult package readerresult
import "github.com/IBM/fp-go/v2/readereither" import (
F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/readereither"
)
// TraverseArray transforms an array // TraverseArray transforms an array
func TraverseArray[A, B any](f Kleisli[A, B]) Kleisli[[]A, []B] { func TraverseArray[A, B any](f Kleisli[A, B]) Kleisli[[]A, []B] {
return readereither.TraverseArray(f) return readereither.TraverseArray(F.Flow2(f, WithContext))
} }
// TraverseArrayWithIndex transforms an array // TraverseArrayWithIndex transforms an array

View File

@@ -17,7 +17,6 @@ package readerresult
import ( import (
F "github.com/IBM/fp-go/v2/function" F "github.com/IBM/fp-go/v2/function"
L "github.com/IBM/fp-go/v2/optics/lens"
G "github.com/IBM/fp-go/v2/readereither/generic" G "github.com/IBM/fp-go/v2/readereither/generic"
) )
@@ -31,16 +30,26 @@ import (
// TenantID string // TenantID string
// } // }
// result := readereither.Do(State{}) // result := readereither.Do(State{})
//
//go:inline
func Do[S any]( func Do[S any](
empty S, empty S,
) ReaderResult[S] { ) ReaderResult[S] {
return G.Do[ReaderResult[S]](empty) return G.Do[ReaderResult[S]](empty)
} }
// Bind attaches the result of a computation to a context [S1] to produce a context [S2]. // Bind attaches the result of an EFFECTFUL computation to a context [S1] to produce a context [S2].
// This enables sequential composition where each step can depend on the results of previous steps // This enables sequential composition where each step can depend on the results of previous steps
// and access the context.Context from the environment. // and access the context.Context from the environment.
// //
// IMPORTANT: Bind is for EFFECTFUL FUNCTIONS that depend on context.Context.
// The function parameter takes state and returns a ReaderResult[T], which is effectful because
// it depends on context.Context (can be cancelled, has deadlines, carries values).
//
// For PURE FUNCTIONS (side-effect free), use:
// - BindResultK: For pure functions with errors (State -> (Value, error))
// - Let: For pure functions without errors (State -> Value)
//
// The setter function takes the result of the computation and returns a function that // The setter function takes the result of the computation and returns a function that
// updates the context from S1 to S2. // updates the context from S1 to S2.
// //
@@ -78,14 +87,27 @@ func Do[S any](
// }, // },
// ), // ),
// ) // )
//
//go:inline
func Bind[S1, S2, T any]( func Bind[S1, S2, T any](
setter func(T) func(S1) S2, setter func(T) func(S1) S2,
f Kleisli[S1, T], f Kleisli[S1, T],
) Kleisli[ReaderResult[S1], S2] { ) Kleisli[ReaderResult[S1], S2] {
return G.Bind[ReaderResult[S1], ReaderResult[S2]](setter, f) return G.Bind[ReaderResult[S1], ReaderResult[S2]](setter, F.Flow2(f, WithContext))
} }
// Let attaches the result of a computation to a context [S1] to produce a context [S2] // Let attaches the result of a PURE computation to a context [S1] to produce a context [S2].
//
// IMPORTANT: Let is for PURE FUNCTIONS (side-effect free) that don't depend on context.Context.
// The function parameter takes state and returns a value directly, with no errors or effects.
//
// For EFFECTFUL FUNCTIONS (that need context.Context), use:
// - Bind: For effectful ReaderResult computations (State -> ReaderResult[Value])
//
// For PURE FUNCTIONS with error handling, use:
// - BindResultK: For pure functions with errors (State -> (Value, error))
//
//go:inline
func Let[S1, S2, T any]( func Let[S1, S2, T any](
setter func(T) func(S1) S2, setter func(T) func(S1) S2,
f func(S1) T, f func(S1) T,
@@ -93,7 +115,10 @@ func Let[S1, S2, T any](
return G.Let[ReaderResult[S1], ReaderResult[S2]](setter, f) return G.Let[ReaderResult[S1], ReaderResult[S2]](setter, f)
} }
// LetTo attaches the a value to a context [S1] to produce a context [S2] // LetTo attaches a constant value to a context [S1] to produce a context [S2].
// This is a PURE operation (side-effect free) that simply sets a field to a constant value.
//
//go:inline
func LetTo[S1, S2, T any]( func LetTo[S1, S2, T any](
setter func(T) func(S1) S2, setter func(T) func(S1) S2,
b T, b T,
@@ -102,15 +127,27 @@ func LetTo[S1, S2, T any](
} }
// BindTo initializes a new state [S1] from a value [T] // BindTo initializes a new state [S1] from a value [T]
//
//go:inline
func BindTo[S1, T any]( func BindTo[S1, T any](
setter func(T) S1, setter func(T) S1,
) Kleisli[ReaderResult[T], S1] { ) Operator[T, S1] {
return G.BindTo[ReaderResult[S1], ReaderResult[T]](setter) return G.BindTo[ReaderResult[S1], ReaderResult[T]](setter)
} }
//go:inline
func BindToP[S1, T any](
setter Prism[S1, T],
) Operator[T, S1] {
return BindTo(setter.ReverseGet)
}
// ApS attaches a value to a context [S1] to produce a context [S2] by considering // ApS attaches a value to a context [S1] to produce a context [S2] by considering
// the context and the value concurrently (using Applicative rather than Monad). // the context and the value concurrently (using Applicative rather than Monad).
// This allows independent computations to be combined without one depending on the result of the other. // This allows independent EFFECTFUL computations to be combined without one depending on the result of the other.
//
// IMPORTANT: ApS is for EFFECTFUL FUNCTIONS that depend on context.Context.
// The ReaderResult parameter is effectful because it depends on context.Context.
// //
// Unlike Bind, which sequences operations, ApS can be used when operations are independent // Unlike Bind, which sequences operations, ApS can be used when operations are independent
// and can conceptually run in parallel. // and can conceptually run in parallel.
@@ -145,6 +182,8 @@ func BindTo[S1, T any](
// getTenantID, // getTenantID,
// ), // ),
// ) // )
//
//go:inline
func ApS[S1, S2, T any]( func ApS[S1, S2, T any](
setter func(T) func(S1) S2, setter func(T) func(S1) S2,
fa ReaderResult[T], fa ReaderResult[T],
@@ -183,17 +222,24 @@ func ApS[S1, S2, T any](
// readereither.Do(Person{Name: "Alice", Age: 25}), // readereither.Do(Person{Name: "Alice", Age: 25}),
// readereither.ApSL(ageLens, getAge), // readereither.ApSL(ageLens, getAge),
// ) // )
//
//go:inline
func ApSL[S, T any]( func ApSL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
fa ReaderResult[T], fa ReaderResult[T],
) Kleisli[ReaderResult[S], S] { ) Kleisli[ReaderResult[S], S] {
return ApS(lens.Set, fa) return ApS(lens.Set, fa)
} }
// BindL is a variant of Bind that uses a lens to focus on a specific field in the state. // BindL is a variant of Bind that uses a lens to focus on a specific field in the state.
// It combines the lens-based field access with monadic composition, allowing you to: // It combines the lens-based field access with monadic composition for EFFECTFUL computations.
//
// IMPORTANT: BindL is for EFFECTFUL FUNCTIONS that depend on context.Context.
// The function parameter returns a ReaderResult, which is effectful.
//
// It allows you to:
// 1. Extract a field value using the lens // 1. Extract a field value using the lens
// 2. Use that value in a computation that may fail // 2. Use that value in an effectful computation that may fail
// 3. Update the field with the result // 3. Update the field with the result
// //
// Parameters: // Parameters:
@@ -227,15 +273,20 @@ func ApSL[S, T any](
// readereither.Of[error](Counter{Value: 42}), // readereither.Of[error](Counter{Value: 42}),
// readereither.BindL(valueLens, increment), // readereither.BindL(valueLens, increment),
// ) // )
//
//go:inline
func BindL[S, T any]( func BindL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
f Kleisli[T, T], f Kleisli[T, T],
) Kleisli[ReaderResult[S], S] { ) Kleisli[ReaderResult[S], S] {
return Bind(lens.Set, F.Flow2(lens.Get, f)) return Bind(lens.Set, F.Flow2(lens.Get, F.Flow2(f, WithContext)))
} }
// LetL is a variant of Let that uses a lens to focus on a specific field in the state. // LetL is a variant of Let that uses a lens to focus on a specific field in the state.
// It applies a pure transformation to the focused field without any effects. // It applies a PURE transformation to the focused field without any effects.
//
// IMPORTANT: LetL is for PURE FUNCTIONS (side-effect free) that don't depend on context.Context.
// The function parameter is a pure endomorphism (T -> T) with no errors or effects.
// //
// Parameters: // Parameters:
// - lens: A lens that focuses on a field of type T within state S // - lens: A lens that focuses on a field of type T within state S
@@ -262,15 +313,17 @@ func BindL[S, T any](
// readereither.LetL(valueLens, double), // readereither.LetL(valueLens, double),
// ) // )
// // result when executed will be Right(Counter{Value: 42}) // // result when executed will be Right(Counter{Value: 42})
//
//go:inline
func LetL[S, T any]( func LetL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
f func(T) T, f Endomorphism[T],
) Kleisli[ReaderResult[S], S] { ) Kleisli[ReaderResult[S], S] {
return Let(lens.Set, F.Flow2(lens.Get, f)) return Let(lens.Set, F.Flow2(lens.Get, f))
} }
// LetToL is a variant of LetTo that uses a lens to focus on a specific field in the state. // LetToL is a variant of LetTo that uses a lens to focus on a specific field in the state.
// It sets the focused field to a constant value. // It sets the focused field to a constant value. This is a PURE operation (side-effect free).
// //
// Parameters: // Parameters:
// - lens: A lens that focuses on a field of type T within state S // - lens: A lens that focuses on a field of type T within state S
@@ -296,8 +349,10 @@ func LetL[S, T any](
// readereither.LetToL(debugLens, false), // readereither.LetToL(debugLens, false),
// ) // )
// // result when executed will be Right(Config{Debug: false, Timeout: 30}) // // result when executed will be Right(Config{Debug: false, Timeout: 30})
//
//go:inline
func LetToL[S, T any]( func LetToL[S, T any](
lens L.Lens[S, T], lens Lens[S, T],
b T, b T,
) Kleisli[ReaderResult[S], S] { ) Kleisli[ReaderResult[S], S] {
return LetTo(lens.Set, b) return LetTo(lens.Set, b)

View File

@@ -19,14 +19,23 @@ import (
"context" "context"
E "github.com/IBM/fp-go/v2/either" E "github.com/IBM/fp-go/v2/either"
F "github.com/IBM/fp-go/v2/function"
) )
// withContext wraps an existing ReaderResult and performs a context check for cancellation before deletating // withContext wraps an existing ReaderResult and performs a context check for cancellation before deletating
func WithContext[A any](ma ReaderResult[A]) ReaderResult[A] { func WithContext[A any](ma ReaderResult[A]) ReaderResult[A] {
return func(ctx context.Context) E.Either[error, A] { return func(ctx context.Context) E.Either[error, A] {
if err := context.Cause(ctx); err != nil { if ctx.Err() != nil {
return E.Left[A](err) return E.Left[A](context.Cause(ctx))
} }
return ma(ctx) return ma(ctx)
} }
} }
//go:inline
func WithContextK[A, B any](f Kleisli[A, B]) Kleisli[A, B] {
return F.Flow2(
f,
WithContext,
)
}

View File

@@ -0,0 +1,154 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerresult
import (
"context"
"github.com/IBM/fp-go/v2/reader"
RR "github.com/IBM/fp-go/v2/readerresult"
)
// SequenceReader swaps the order of environment parameters when the inner computation is a Reader.
//
// This function is specialized for the context.Context-based ReaderResult monad. It takes a
// ReaderResult that produces a Reader and returns a reader.Kleisli that produces Results.
// The context.Context is implicitly used as the outer environment type.
//
// Type Parameters:
// - R: The inner environment type (becomes outer after flip)
// - A: The success value type
//
// Parameters:
// - ma: A ReaderResult that takes context.Context and may produce a Reader[R, A]
//
// Returns:
// - A reader.Kleisli[context.Context, R, Result[A]], which is func(context.Context) func(R) Result[A]
//
// The function preserves error handling from the outer ReaderResult layer. If the outer
// computation fails, the error is propagated to the inner Result.
//
// Note: This is an inline wrapper around readerresult.SequenceReader, specialized for
// context.Context as the outer environment type.
//
// Example:
//
// type Database struct {
// ConnectionString string
// }
//
// // Original: takes context, may fail, produces Reader[Database, string]
// original := func(ctx context.Context) result.Result[reader.Reader[Database, string]] {
// if ctx.Err() != nil {
// return result.Error[reader.Reader[Database, string]](ctx.Err())
// }
// return result.Ok[error](func(db Database) string {
// return fmt.Sprintf("Query on %s", db.ConnectionString)
// })
// }
//
// // Sequenced: takes context first, then Database
// sequenced := SequenceReader(original)
//
// ctx := context.Background()
// db := Database{ConnectionString: "localhost:5432"}
//
// // Apply context first to get a function that takes database
// dbReader := sequenced(ctx)
// // Then apply database to get the final result
// result := dbReader(db)
// // result is Result[string]
//
// Use Cases:
// - Dependency injection: Flip parameter order to inject context first, then dependencies
// - Testing: Separate context handling from business logic for easier testing
// - Composition: Enable point-free style by fixing the context parameter first
//
//go:inline
func SequenceReader[R, A any](ma ReaderResult[Reader[R, A]]) reader.Kleisli[context.Context, R, Result[A]] {
return RR.SequenceReader(ma)
}
// TraverseReader transforms a value using a Reader function and swaps environment parameter order.
//
// This function combines mapping and parameter flipping in a single operation. It takes a
// Reader function (pure computation without error handling) and returns a function that:
// 1. Maps a ReaderResult[A] to ReaderResult[B] using the provided Reader function
// 2. Flips the parameter order so R comes before context.Context
//
// Type Parameters:
// - R: The inner environment type (becomes outer after flip)
// - A: The input value type
// - B: The output value type
//
// Parameters:
// - f: A reader.Kleisli[R, A, B], which is func(R) func(A) B - a pure Reader function
//
// Returns:
// - A function that takes ReaderResult[A] and returns Kleisli[R, B]
// - Kleisli[R, B] is func(R) ReaderResult[B], which is func(R) func(context.Context) Result[B]
//
// The function preserves error handling from the input ReaderResult. If the input computation
// fails, the error is propagated without applying the transformation function.
//
// Note: This is a wrapper around readerresult.TraverseReader, specialized for context.Context.
//
// Example:
//
// type Config struct {
// MaxRetries int
// }
//
// // A pure Reader function that depends on Config
// formatMessage := func(cfg Config) func(int) string {
// return func(value int) string {
// return fmt.Sprintf("Value: %d, MaxRetries: %d", value, cfg.MaxRetries)
// }
// }
//
// // Original computation that may fail
// computation := func(ctx context.Context) result.Result[int] {
// if ctx.Err() != nil {
// return result.Error[int](ctx.Err())
// }
// return result.Ok[error](42)
// }
//
// // Create a traversal that applies formatMessage and flips parameters
// traverse := TraverseReader[Config, int, string](formatMessage)
//
// // Apply to the computation
// flipped := traverse(computation)
//
// // Now we can provide Config first, then context
// cfg := Config{MaxRetries: 3}
// ctx := context.Background()
//
// result := flipped(cfg)(ctx)
// // result is Result[string] containing "Value: 42, MaxRetries: 3"
//
// Use Cases:
// - Dependency injection: Inject configuration/dependencies before context
// - Testing: Separate pure business logic from context handling
// - Composition: Build pipelines where dependencies are fixed before execution
// - Point-free style: Enable partial application by fixing dependencies first
//
//go:inline
func TraverseReader[R, A, B any](
f reader.Kleisli[R, A, B],
) func(ReaderResult[A]) Kleisli[R, B] {
return RR.TraverseReader[context.Context](f)
}

View File

@@ -0,0 +1,215 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package readerresult provides logging utilities for the ReaderResult monad,
// which combines the Reader monad (for dependency injection via context.Context)
// with the Result monad (for error handling).
//
// The logging functions in this package allow you to log Result values (both
// successes and errors) while preserving the functional composition style.
package readerresult
import (
"context"
"log/slog"
F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/logging"
"github.com/IBM/fp-go/v2/reader"
"github.com/IBM/fp-go/v2/result"
)
// curriedLog creates a curried logging function that takes an slog.Attr and a context,
// then logs the attribute with the specified log level and message.
//
// This is an internal helper function used to create the logging pipeline in a
// point-free style. The currying allows for partial application in functional
// composition.
//
// Parameters:
// - logLevel: The slog.Level at which to log (e.g., LevelInfo, LevelError)
// - cb: A callback function that retrieves a logger from the context
// - message: The log message to display
//
// Returns:
// - A curried function that takes an slog.Attr, then a context, and performs logging
func curriedLog(
logLevel slog.Level,
cb func(context.Context) *slog.Logger,
message string) func(slog.Attr) Reader[context.Context, struct{}] {
return F.Curry2(func(a slog.Attr, ctx context.Context) struct{} {
cb(ctx).LogAttrs(ctx, logLevel, message, a)
return struct{}{}
})
}
// SLogWithCallback creates a Kleisli arrow that logs a Result value using a custom
// logger callback and log level. The Result value is logged and then returned unchanged,
// making this function suitable for use in functional pipelines.
//
// This function logs both successful values and errors:
// - Success values are logged with the key "value"
// - Error values are logged with the key "error"
//
// The logging is performed as a side effect while preserving the Result value,
// allowing it to be used in the middle of a computation pipeline without
// interrupting the flow.
//
// Type Parameters:
// - A: The type of the success value in the Result
//
// Parameters:
// - logLevel: The slog.Level at which to log (e.g., LevelInfo, LevelDebug, LevelError)
// - cb: A callback function that retrieves a *slog.Logger from the context
// - message: The log message to display
//
// Returns:
// - A Kleisli arrow that takes a Result[A] and returns a ReaderResult[A]
// The returned ReaderResult, when executed with a context, logs the Result
// and returns it unchanged
//
// Example:
//
// type User struct {
// ID int
// Name string
// }
//
// // Custom logger callback
// getLogger := func(ctx context.Context) *slog.Logger {
// return slog.Default()
// }
//
// // Create a logging function for debug level
// logDebug := SLogWithCallback[User](slog.LevelDebug, getLogger, "User data")
//
// // Use in a pipeline
// ctx := context.Background()
// user := result.Of(User{ID: 123, Name: "Alice"})
// logged := logDebug(user)(ctx) // Logs: level=DEBUG msg="User data" value={ID:123 Name:Alice}
// // logged still contains the User value
//
// Example with error:
//
// err := errors.New("user not found")
// userResult := result.Left[User](err)
// logged := logDebug(userResult)(ctx) // Logs: level=DEBUG msg="User data" error="user not found"
// // logged still contains the error
func SLogWithCallback[A any](
logLevel slog.Level,
cb func(context.Context) *slog.Logger,
message string) Kleisli[Result[A], A] {
return F.Pipe1(
F.Flow2(
result.ToSLogAttr[A](),
curriedLog(logLevel, cb, message),
),
reader.Chain(reader.Sequence(F.Flow2( // this flow is basically the `MapTo` function with side effects
reader.Of[struct{}, Result[A]],
reader.Map[context.Context, struct{}, Result[A]],
))),
)
}
// SLog creates a Kleisli arrow that logs a Result value at INFO level using the
// logger from the context. This is a convenience function that uses SLogWithCallback
// with default settings.
//
// The Result value is logged and then returned unchanged, making this function
// suitable for use in functional pipelines for debugging or monitoring purposes.
//
// This function logs both successful values and errors:
// - Success values are logged with the key "value"
// - Error values are logged with the key "error"
//
// Type Parameters:
// - A: The type of the success value in the Result
//
// Parameters:
// - message: The log message to display
//
// Returns:
// - A Kleisli arrow that takes a Result[A] and returns a ReaderResult[A]
// The returned ReaderResult, when executed with a context, logs the Result
// at INFO level and returns it unchanged
//
// Example - Logging a successful computation:
//
// ctx := context.Background()
//
// // Simple value logging
// res := result.Of(42)
// logged := SLog[int]("Processing number")(res)(ctx)
// // Logs: level=INFO msg="Processing number" value=42
// // logged == result.Of(42)
//
// Example - Logging in a pipeline:
//
// type User struct {
// ID int
// Name string
// }
//
// fetchUser := func(id int) result.Result[User] {
// return result.Of(User{ID: id, Name: "Alice"})
// }
//
// processUser := func(user User) result.Result[string] {
// return result.Of(fmt.Sprintf("Processed: %s", user.Name))
// }
//
// ctx := context.Background()
//
// // Log at each step
// userResult := fetchUser(123)
// logged1 := SLog[User]("Fetched user")(userResult)(ctx)
// // Logs: level=INFO msg="Fetched user" value={ID:123 Name:Alice}
//
// processed := result.Chain(processUser)(logged1)
// logged2 := SLog[string]("Processed user")(processed)(ctx)
// // Logs: level=INFO msg="Processed user" value="Processed: Alice"
//
// Example - Logging errors:
//
// err := errors.New("database connection failed")
// errResult := result.Left[User](err)
// logged := SLog[User]("Database operation")(errResult)(ctx)
// // Logs: level=INFO msg="Database operation" error="database connection failed"
// // logged still contains the error
//
// Example - Using with context logger:
//
// // Set up a custom logger in the context
// logger := slog.New(slog.NewJSONHandler(os.Stdout, nil))
// ctx := logging.WithLogger(logger)(context.Background())
//
// res := result.Of("important data")
// logged := SLog[string]("Critical operation")(res)(ctx)
// // Uses the logger from context to log the message
//
// Note: The function uses logging.GetLoggerFromContext to retrieve the logger,
// which falls back to the global logger if no logger is found in the context.
//
//go:inline
func SLog[A any](message string) Kleisli[Result[A], A] {
return SLogWithCallback[A](slog.LevelInfo, logging.GetLoggerFromContext, message)
}
//go:inline
func TapSLog[A any](message string) Operator[A, A] {
return reader.Chain(SLog[A](message))
}

View File

@@ -0,0 +1,302 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerresult
import (
"bytes"
"context"
"errors"
"log/slog"
"testing"
"github.com/IBM/fp-go/v2/logging"
N "github.com/IBM/fp-go/v2/number"
"github.com/IBM/fp-go/v2/result"
"github.com/stretchr/testify/assert"
)
// TestSLogLogsSuccessValue tests that SLog logs successful Result values
func TestSLogLogsSuccessValue(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
ctx := context.Background()
// Create a Result and log it
res1 := result.Of(42)
logged := SLog[int]("Result value")(res1)(ctx)
assert.Equal(t, result.Of(42), logged)
logOutput := buf.String()
assert.Contains(t, logOutput, "Result value")
assert.Contains(t, logOutput, "value=42")
}
// TestSLogLogsErrorValue tests that SLog logs error Result values
func TestSLogLogsErrorValue(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
ctx := context.Background()
testErr := errors.New("test error")
// Create an error Result and log it
res1 := result.Left[int](testErr)
logged := SLog[int]("Result value")(res1)(ctx)
assert.Equal(t, res1, logged)
logOutput := buf.String()
assert.Contains(t, logOutput, "Result value")
assert.Contains(t, logOutput, "error")
assert.Contains(t, logOutput, "test error")
}
// TestSLogInPipeline tests SLog in a functional pipeline
func TestSLogInPipeline(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
ctx := context.Background()
// SLog takes a Result[A] and returns ReaderResult[A]
// So we need to start with a Result, apply SLog, then execute with context
res1 := result.Of(10)
logged := SLog[int]("Initial value")(res1)(ctx)
assert.Equal(t, result.Of(10), logged)
logOutput := buf.String()
assert.Contains(t, logOutput, "Initial value")
assert.Contains(t, logOutput, "value=10")
}
// TestSLogWithContextLogger tests SLog using logger from context
func TestSLogWithContextLogger(t *testing.T) {
var buf bytes.Buffer
contextLogger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
ctx := logging.WithLogger(contextLogger)(context.Background())
res1 := result.Of("test value")
logged := SLog[string]("Context logger test")(res1)(ctx)
assert.Equal(t, result.Of("test value"), logged)
logOutput := buf.String()
assert.Contains(t, logOutput, "Context logger test")
assert.Contains(t, logOutput, `value="test value"`)
}
// TestSLogDisabled tests that SLog respects logger level
func TestSLogDisabled(t *testing.T) {
var buf bytes.Buffer
// Create logger with level that disables info logs
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelError, // Only log errors
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
ctx := context.Background()
res1 := result.Of(42)
logged := SLog[int]("This should not be logged")(res1)(ctx)
assert.Equal(t, result.Of(42), logged)
// Should have no logs since level is ERROR
logOutput := buf.String()
assert.Empty(t, logOutput, "Should have no logs when logging is disabled")
}
// TestSLogWithStruct tests SLog with structured data
func TestSLogWithStruct(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
type User struct {
ID int
Name string
}
ctx := context.Background()
user := User{ID: 123, Name: "Alice"}
res1 := result.Of(user)
logged := SLog[User]("User data")(res1)(ctx)
assert.Equal(t, result.Of(user), logged)
logOutput := buf.String()
assert.Contains(t, logOutput, "User data")
assert.Contains(t, logOutput, "ID:123")
assert.Contains(t, logOutput, "Name:Alice")
}
// TestSLogWithCallbackCustomLevel tests SLogWithCallback with custom log level
func TestSLogWithCallbackCustomLevel(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelDebug,
}))
customCallback := func(ctx context.Context) *slog.Logger {
return logger
}
ctx := context.Background()
// Create a Result and log it with custom callback
res1 := result.Of(42)
logged := SLogWithCallback[int](slog.LevelDebug, customCallback, "Debug result")(res1)(ctx)
assert.Equal(t, result.Of(42), logged)
logOutput := buf.String()
assert.Contains(t, logOutput, "Debug result")
assert.Contains(t, logOutput, "value=42")
assert.Contains(t, logOutput, "level=DEBUG")
}
// TestSLogWithCallbackLogsError tests SLogWithCallback logs errors
func TestSLogWithCallbackLogsError(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelWarn,
}))
customCallback := func(ctx context.Context) *slog.Logger {
return logger
}
ctx := context.Background()
testErr := errors.New("warning error")
// Create an error Result and log it with custom callback
res1 := result.Left[int](testErr)
logged := SLogWithCallback[int](slog.LevelWarn, customCallback, "Warning result")(res1)(ctx)
assert.Equal(t, res1, logged)
logOutput := buf.String()
assert.Contains(t, logOutput, "Warning result")
assert.Contains(t, logOutput, "error")
assert.Contains(t, logOutput, "warning error")
assert.Contains(t, logOutput, "level=WARN")
}
// TestSLogChainedOperations tests SLog in chained operations
func TestSLogChainedOperations(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
ctx := context.Background()
// First log step 1
res1 := result.Of(5)
logged1 := SLog[int]("Step 1")(res1)(ctx)
// Then log step 2 with doubled value
res2 := result.Map(N.Mul(2))(logged1)
logged2 := SLog[int]("Step 2")(res2)(ctx)
assert.Equal(t, result.Of(10), logged2)
logOutput := buf.String()
assert.Contains(t, logOutput, "Step 1")
assert.Contains(t, logOutput, "value=5")
assert.Contains(t, logOutput, "Step 2")
assert.Contains(t, logOutput, "value=10")
}
// TestSLogPreservesError tests that SLog preserves error through the pipeline
func TestSLogPreservesError(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
ctx := context.Background()
testErr := errors.New("original error")
res1 := result.Left[int](testErr)
logged := SLog[int]("Logging error")(res1)(ctx)
// Apply map to verify error is preserved
res2 := result.Map(N.Mul(2))(logged)
assert.Equal(t, res1, res2)
logOutput := buf.String()
assert.Contains(t, logOutput, "Logging error")
assert.Contains(t, logOutput, "original error")
}
// TestSLogMultipleValues tests logging multiple different values
func TestSLogMultipleValues(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
ctx := context.Background()
// Test with different types
intRes := SLog[int]("Integer")(result.Of(42))(ctx)
assert.Equal(t, result.Of(42), intRes)
strRes := SLog[string]("String")(result.Of("hello"))(ctx)
assert.Equal(t, result.Of("hello"), strRes)
boolRes := SLog[bool]("Boolean")(result.Of(true))(ctx)
assert.Equal(t, result.Of(true), boolRes)
logOutput := buf.String()
assert.Contains(t, logOutput, "Integer")
assert.Contains(t, logOutput, "value=42")
assert.Contains(t, logOutput, "String")
assert.Contains(t, logOutput, "value=hello")
assert.Contains(t, logOutput, "Boolean")
assert.Contains(t, logOutput, "value=true")
}

View File

@@ -18,9 +18,17 @@ package readerresult
import ( import (
"context" "context"
F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/internal/chain"
"github.com/IBM/fp-go/v2/option"
"github.com/IBM/fp-go/v2/reader"
"github.com/IBM/fp-go/v2/readereither" "github.com/IBM/fp-go/v2/readereither"
) )
func FromReader[A any](r Reader[context.Context, A]) ReaderResult[A] {
return readereither.FromReader[error](r)
}
func FromEither[A any](e Either[A]) ReaderResult[A] { func FromEither[A any](e Either[A]) ReaderResult[A] {
return readereither.FromEither[context.Context](e) return readereither.FromEither[context.Context](e)
} }
@@ -42,11 +50,11 @@ func Map[A, B any](f func(A) B) Operator[A, B] {
} }
func MonadChain[A, B any](ma ReaderResult[A], f Kleisli[A, B]) ReaderResult[B] { func MonadChain[A, B any](ma ReaderResult[A], f Kleisli[A, B]) ReaderResult[B] {
return readereither.MonadChain(ma, f) return readereither.MonadChain(ma, F.Flow2(f, WithContext))
} }
func Chain[A, B any](f Kleisli[A, B]) Operator[A, B] { func Chain[A, B any](f Kleisli[A, B]) Operator[A, B] {
return readereither.Chain(f) return readereither.Chain(F.Flow2(f, WithContext))
} }
func Of[A any](a A) ReaderResult[A] { func Of[A any](a A) ReaderResult[A] {
@@ -66,7 +74,7 @@ func FromPredicate[A any](pred func(A) bool, onFalse func(A) error) Kleisli[A, A
} }
func OrElse[A any](onLeft Kleisli[error, A]) Kleisli[ReaderResult[A], A] { func OrElse[A any](onLeft Kleisli[error, A]) Kleisli[ReaderResult[A], A] {
return readereither.OrElse(onLeft) return readereither.OrElse(F.Flow2(onLeft, WithContext))
} }
func Ask() ReaderResult[context.Context] { func Ask() ReaderResult[context.Context] {
@@ -81,7 +89,7 @@ func ChainEitherK[A, B any](f func(A) Either[B]) func(ma ReaderResult[A]) Reader
return readereither.ChainEitherK[context.Context](f) return readereither.ChainEitherK[context.Context](f)
} }
func ChainOptionK[A, B any](onNone func() error) func(func(A) Option[B]) Operator[A, B] { func ChainOptionK[A, B any](onNone func() error) func(option.Kleisli[A, B]) Operator[A, B] {
return readereither.ChainOptionK[context.Context, A, B](onNone) return readereither.ChainOptionK[context.Context, A, B](onNone)
} }
@@ -97,3 +105,197 @@ func Flap[B, A any](a A) Operator[func(A) B, B] {
func Read[A any](r context.Context) func(ReaderResult[A]) Result[A] { func Read[A any](r context.Context) func(ReaderResult[A]) Result[A] {
return readereither.Read[error, A](r) return readereither.Read[error, A](r)
} }
// MonadMapTo executes a ReaderResult computation, discards its success value, and returns a constant value.
// This is the monadic version that takes both the ReaderResult and the constant value as parameters.
//
// IMPORTANT: ReaderResult represents a side-effectful computation because it depends on context.Context,
// which is effectful (can be cancelled, has deadlines, carries values). For this reason, MonadMapTo WILL
// execute the original ReaderResult to allow any side effects to occur, then discard the success result
// and return the constant value. If the original computation fails, the error is preserved.
//
// Type Parameters:
// - A: The success type of the first ReaderResult (will be discarded if successful)
// - B: The type of the constant value to return on success
//
// Parameters:
// - ma: The ReaderResult to execute (side effects will occur, success value discarded)
// - b: The constant value to return if ma succeeds
//
// Returns:
// - A ReaderResult that executes ma, preserves errors, but replaces success values with b
//
// Example:
//
// type Config struct { Counter int }
// increment := func(ctx context.Context) result.Result[int] {
// // Side effect: log the operation
// fmt.Println("incrementing")
// return result.Of(5)
// }
// r := readerresult.MonadMapTo(increment, "done")
// result := r(context.Background()) // Prints "incrementing", returns Right("done")
//
//go:inline
func MonadMapTo[A, B any](ma ReaderResult[A], b B) ReaderResult[B] {
return MonadMap(ma, reader.Of[A](b))
}
// MapTo creates an operator that executes a ReaderResult computation, discards its success value,
// and returns a constant value. This is the curried version where the constant value is provided first,
// returning a function that can be applied to any ReaderResult.
//
// IMPORTANT: ReaderResult represents a side-effectful computation because it depends on context.Context,
// which is effectful (can be cancelled, has deadlines, carries values). For this reason, MapTo WILL
// execute the input ReaderResult to allow any side effects to occur, then discard the success result
// and return the constant value. If the computation fails, the error is preserved.
//
// Type Parameters:
// - A: The success type of the input ReaderResult (will be discarded if successful)
// - B: The type of the constant value to return on success
//
// Parameters:
// - b: The constant value to return on success
//
// Returns:
// - An Operator that executes a ReaderResult[A], preserves errors, but replaces success with b
//
// Example:
//
// logStep := func(ctx context.Context) result.Result[int] {
// fmt.Println("step executed")
// return result.Of(42)
// }
// toDone := readerresult.MapTo[int, string]("done")
// pipeline := toDone(logStep)
// result := pipeline(context.Background()) // Prints "step executed", returns Right("done")
//
// Example - In a functional pipeline:
//
// step1 := func(ctx context.Context) result.Result[int] {
// fmt.Println("processing")
// return result.Of(1)
// }
// pipeline := F.Pipe1(
// step1,
// readerresult.MapTo[int, string]("complete"),
// )
// output := pipeline(context.Background()) // Prints "processing", returns Right("complete")
//
//go:inline
func MapTo[A, B any](b B) Operator[A, B] {
return Map(reader.Of[A](b))
}
// MonadChainTo sequences two ReaderResult computations where the second ignores the first's success value.
// This is the monadic version that takes both ReaderResults as parameters.
//
// IMPORTANT: ReaderResult represents a side-effectful computation because it depends on context.Context,
// which is effectful (can be cancelled, has deadlines, carries values). For this reason, MonadChainTo WILL
// execute the first ReaderResult to allow any side effects to occur, then discard the success result and
// execute the second ReaderResult with the same context. If the first computation fails, the error is
// returned immediately without executing the second computation.
//
// Type Parameters:
// - A: The success type of the first ReaderResult (will be discarded if successful)
// - B: The success type of the second ReaderResult
//
// Parameters:
// - ma: The first ReaderResult to execute (side effects will occur, success value discarded)
// - b: The second ReaderResult to execute if ma succeeds
//
// Returns:
// - A ReaderResult that executes ma, then b if ma succeeds, returning b's result
//
// Example:
//
// logStart := func(ctx context.Context) result.Result[int] {
// fmt.Println("starting")
// return result.Of(1)
// }
// logEnd := func(ctx context.Context) result.Result[string] {
// fmt.Println("ending")
// return result.Of("done")
// }
// r := readerresult.MonadChainTo(logStart, logEnd)
// result := r(context.Background()) // Prints "starting" then "ending", returns Right("done")
//
//go:inline
func MonadChainTo[A, B any](ma ReaderResult[A], b ReaderResult[B]) ReaderResult[B] {
return MonadChain(ma, reader.Of[A](b))
}
// ChainTo creates an operator that sequences two ReaderResult computations where the second ignores
// the first's success value. This is the curried version where the second ReaderResult is provided first,
// returning a function that can be applied to any first ReaderResult.
//
// IMPORTANT: ReaderResult represents a side-effectful computation because it depends on context.Context,
// which is effectful (can be cancelled, has deadlines, carries values). For this reason, ChainTo WILL
// execute the first ReaderResult to allow any side effects to occur, then discard the success result and
// execute the second ReaderResult with the same context. If the first computation fails, the error is
// returned immediately without executing the second computation.
//
// Type Parameters:
// - A: The success type of the first ReaderResult (will be discarded if successful)
// - B: The success type of the second ReaderResult
//
// Parameters:
// - b: The second ReaderResult to execute after the first succeeds
//
// Returns:
// - An Operator that executes the first ReaderResult, then b if successful
//
// Example:
//
// logEnd := func(ctx context.Context) result.Result[string] {
// fmt.Println("ending")
// return result.Of("done")
// }
// thenLogEnd := readerresult.ChainTo[int, string](logEnd)
//
// logStart := func(ctx context.Context) result.Result[int] {
// fmt.Println("starting")
// return result.Of(1)
// }
// pipeline := thenLogEnd(logStart)
// result := pipeline(context.Background()) // Prints "starting" then "ending", returns Right("done")
//
// Example - In a functional pipeline:
//
// step1 := func(ctx context.Context) result.Result[int] {
// fmt.Println("step 1")
// return result.Of(1)
// }
// step2 := func(ctx context.Context) result.Result[string] {
// fmt.Println("step 2")
// return result.Of("complete")
// }
// pipeline := F.Pipe1(
// step1,
// readerresult.ChainTo[int, string](step2),
// )
// output := pipeline(context.Background()) // Prints "step 1" then "step 2", returns Right("complete")
//
//go:inline
func ChainTo[A, B any](b ReaderResult[B]) Operator[A, B] {
return Chain(reader.Of[A](b))
}
//go:inline
func MonadChainFirst[A, B any](ma ReaderResult[A], f Kleisli[A, B]) ReaderResult[A] {
return chain.MonadChainFirst(
MonadChain,
MonadMap,
ma,
F.Flow2(f, WithContext),
)
}
//go:inline
func ChainFirst[A, B any](f Kleisli[A, B]) Operator[A, A] {
return chain.ChainFirst(
Chain,
Map,
F.Flow2(f, WithContext),
)
}

View File

@@ -0,0 +1,315 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerresult
import (
"context"
"testing"
E "github.com/IBM/fp-go/v2/either"
F "github.com/IBM/fp-go/v2/function"
"github.com/stretchr/testify/assert"
)
func TestMapTo(t *testing.T) {
t.Run("executes original reader and returns constant value on success", func(t *testing.T) {
executed := false
originalReader := func(ctx context.Context) E.Either[error, int] {
executed = true
return E.Of[error](42)
}
// Apply MapTo operator
toDone := MapTo[int]("done")
resultReader := toDone(originalReader)
// Execute the resulting reader
result := resultReader(context.Background())
// Verify the constant value is returned
assert.Equal(t, E.Of[error]("done"), result)
// Verify the original reader WAS executed (side effect occurred)
assert.True(t, executed, "original reader should be executed to allow side effects")
})
t.Run("executes reader in functional pipeline", func(t *testing.T) {
executed := false
step1 := func(ctx context.Context) E.Either[error, int] {
executed = true
return E.Of[error](100)
}
pipeline := F.Pipe1(
step1,
MapTo[int]("complete"),
)
result := pipeline(context.Background())
assert.Equal(t, E.Of[error]("complete"), result)
assert.True(t, executed, "original reader should be executed in pipeline")
})
t.Run("executes reader with side effects", func(t *testing.T) {
sideEffectOccurred := false
readerWithSideEffect := func(ctx context.Context) E.Either[error, int] {
sideEffectOccurred = true
return E.Of[error](42)
}
resultReader := MapTo[int](true)(readerWithSideEffect)
result := resultReader(context.Background())
assert.Equal(t, E.Of[error](true), result)
assert.True(t, sideEffectOccurred, "side effect should occur")
})
t.Run("preserves errors from original reader", func(t *testing.T) {
executed := false
testErr := assert.AnError
failingReader := func(ctx context.Context) E.Either[error, int] {
executed = true
return E.Left[int](testErr)
}
resultReader := MapTo[int]("done")(failingReader)
result := resultReader(context.Background())
assert.Equal(t, E.Left[string](testErr), result)
assert.True(t, executed, "failing reader should still be executed")
})
}
func TestMonadMapTo(t *testing.T) {
t.Run("executes original reader and returns constant value on success", func(t *testing.T) {
executed := false
originalReader := func(ctx context.Context) E.Either[error, int] {
executed = true
return E.Of[error](42)
}
// Apply MonadMapTo
resultReader := MonadMapTo(originalReader, "done")
// Execute the resulting reader
result := resultReader(context.Background())
// Verify the constant value is returned
assert.Equal(t, E.Of[error]("done"), result)
// Verify the original reader WAS executed (side effect occurred)
assert.True(t, executed, "original reader should be executed to allow side effects")
})
t.Run("executes complex computation with side effects", func(t *testing.T) {
computationExecuted := false
complexReader := func(ctx context.Context) E.Either[error, string] {
computationExecuted = true
return E.Of[error]("complex result")
}
resultReader := MonadMapTo(complexReader, 42)
result := resultReader(context.Background())
assert.Equal(t, E.Of[error](42), result)
assert.True(t, computationExecuted, "complex computation should be executed")
})
t.Run("preserves errors from original reader", func(t *testing.T) {
executed := false
testErr := assert.AnError
failingReader := func(ctx context.Context) E.Either[error, []string] {
executed = true
return E.Left[[]string](testErr)
}
resultReader := MonadMapTo(failingReader, 99)
result := resultReader(context.Background())
assert.Equal(t, E.Left[int](testErr), result)
assert.True(t, executed, "failing reader should still be executed")
})
}
func TestChainTo(t *testing.T) {
t.Run("executes first reader then second reader on success", func(t *testing.T) {
firstExecuted := false
secondExecuted := false
firstReader := func(ctx context.Context) E.Either[error, int] {
firstExecuted = true
return E.Of[error](42)
}
secondReader := func(ctx context.Context) E.Either[error, string] {
secondExecuted = true
return E.Of[error]("result")
}
// Apply ChainTo operator
thenSecond := ChainTo[int](secondReader)
resultReader := thenSecond(firstReader)
// Execute the resulting reader
result := resultReader(context.Background())
// Verify the second reader's result is returned
assert.Equal(t, E.Of[error]("result"), result)
// Verify both readers were executed
assert.True(t, firstExecuted, "first reader should be executed")
assert.True(t, secondExecuted, "second reader should be executed")
})
t.Run("executes both readers in functional pipeline", func(t *testing.T) {
firstExecuted := false
secondExecuted := false
step1 := func(ctx context.Context) E.Either[error, int] {
firstExecuted = true
return E.Of[error](100)
}
step2 := func(ctx context.Context) E.Either[error, string] {
secondExecuted = true
return E.Of[error]("complete")
}
pipeline := F.Pipe1(
step1,
ChainTo[int](step2),
)
result := pipeline(context.Background())
assert.Equal(t, E.Of[error]("complete"), result)
assert.True(t, firstExecuted, "first reader should be executed in pipeline")
assert.True(t, secondExecuted, "second reader should be executed in pipeline")
})
t.Run("executes first reader with side effects", func(t *testing.T) {
sideEffectOccurred := false
readerWithSideEffect := func(ctx context.Context) E.Either[error, int] {
sideEffectOccurred = true
return E.Of[error](42)
}
secondReader := func(ctx context.Context) E.Either[error, bool] {
return E.Of[error](true)
}
resultReader := ChainTo[int](secondReader)(readerWithSideEffect)
result := resultReader(context.Background())
assert.Equal(t, E.Of[error](true), result)
assert.True(t, sideEffectOccurred, "side effect should occur in first reader")
})
t.Run("preserves error from first reader without executing second", func(t *testing.T) {
firstExecuted := false
secondExecuted := false
testErr := assert.AnError
failingReader := func(ctx context.Context) E.Either[error, int] {
firstExecuted = true
return E.Left[int](testErr)
}
secondReader := func(ctx context.Context) E.Either[error, string] {
secondExecuted = true
return E.Of[error]("result")
}
resultReader := ChainTo[int](secondReader)(failingReader)
result := resultReader(context.Background())
assert.Equal(t, E.Left[string](testErr), result)
assert.True(t, firstExecuted, "first reader should be executed")
assert.False(t, secondExecuted, "second reader should not be executed on error")
})
}
func TestMonadChainTo(t *testing.T) {
t.Run("executes first reader then second reader on success", func(t *testing.T) {
firstExecuted := false
secondExecuted := false
firstReader := func(ctx context.Context) E.Either[error, int] {
firstExecuted = true
return E.Of[error](42)
}
secondReader := func(ctx context.Context) E.Either[error, string] {
secondExecuted = true
return E.Of[error]("result")
}
// Apply MonadChainTo
resultReader := MonadChainTo(firstReader, secondReader)
// Execute the resulting reader
result := resultReader(context.Background())
// Verify the second reader's result is returned
assert.Equal(t, E.Of[error]("result"), result)
// Verify both readers were executed
assert.True(t, firstExecuted, "first reader should be executed")
assert.True(t, secondExecuted, "second reader should be executed")
})
t.Run("executes complex first computation with side effects", func(t *testing.T) {
firstExecuted := false
secondExecuted := false
complexFirstReader := func(ctx context.Context) E.Either[error, []int] {
firstExecuted = true
return E.Of[error]([]int{1, 2, 3})
}
secondReader := func(ctx context.Context) E.Either[error, string] {
secondExecuted = true
return E.Of[error]("done")
}
resultReader := MonadChainTo(complexFirstReader, secondReader)
result := resultReader(context.Background())
assert.Equal(t, E.Of[error]("done"), result)
assert.True(t, firstExecuted, "complex first computation should be executed")
assert.True(t, secondExecuted, "second reader should be executed")
})
t.Run("preserves error from first reader without executing second", func(t *testing.T) {
firstExecuted := false
secondExecuted := false
testErr := assert.AnError
failingReader := func(ctx context.Context) E.Either[error, map[string]int] {
firstExecuted = true
return E.Left[map[string]int](testErr)
}
secondReader := func(ctx context.Context) E.Either[error, float64] {
secondExecuted = true
return E.Of[error](3.14)
}
resultReader := MonadChainTo(failingReader, secondReader)
result := resultReader(context.Background())
assert.Equal(t, E.Left[float64](testErr), result)
assert.True(t, firstExecuted, "first reader should be executed")
assert.False(t, secondExecuted, "second reader should not be executed on error")
})
}

View File

@@ -0,0 +1,105 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package readerresult implements a specialization of the Reader monad assuming a golang context as the context of the monad and a standard golang error
package readerresult
import (
"context"
"github.com/IBM/fp-go/v2/either"
"github.com/IBM/fp-go/v2/result"
)
// TailRec implements tail-recursive computation for ReaderResult with context cancellation support.
//
// TailRec takes a Kleisli function that returns Trampoline[A, B] and converts it into a stack-safe,
// tail-recursive computation. The function repeatedly applies the Kleisli until it produces a Land value.
//
// The implementation includes a short-circuit mechanism that checks for context cancellation on each
// iteration. If the context is canceled (ctx.Err() != nil), the computation immediately returns a
// Left result containing the context's cause error, preventing unnecessary computation.
//
// Type Parameters:
// - A: The input type for the recursive step
// - B: The final result type
//
// Parameters:
// - f: A Kleisli function that takes an A and returns a ReaderResult containing Trampoline[A, B].
// When the result is Bounce(a), recursion continues with the new value 'a'.
// When the result is Land(b), recursion terminates with the final value 'b'.
//
// Returns:
// - A Kleisli function that performs the tail-recursive computation in a stack-safe manner.
//
// Behavior:
// - On each iteration, checks if the context has been canceled (short circuit)
// - If canceled, returns result.Left[B](context.Cause(ctx))
// - If the step returns Left[B](error), propagates the error
// - If the step returns Right[A](Bounce(a)), continues recursion with new value 'a'
// - If the step returns Right[A](Land(b)), terminates with success value 'b'
//
// Example - Factorial computation with context:
//
// type State struct {
// n int
// acc int
// }
//
// factorialStep := func(state State) ReaderResult[tailrec.Trampoline[State, int]] {
// return func(ctx context.Context) result.Result[tailrec.Trampoline[State, int]] {
// if state.n <= 0 {
// return result.Of(tailrec.Land[State](state.acc))
// }
// return result.Of(tailrec.Bounce[int](State{state.n - 1, state.acc * state.n}))
// }
// }
//
// factorial := TailRec(factorialStep)
// result := factorial(State{5, 1})(ctx) // Returns result.Of(120)
//
// Example - Context cancellation:
//
// ctx, cancel := context.WithCancel(context.Background())
// cancel() // Cancel immediately
//
// computation := TailRec(someStep)
// result := computation(initialValue)(ctx)
// // Returns result.Left[B](context.Cause(ctx)) without executing any steps
//
//go:inline
func TailRec[A, B any](f Kleisli[A, Trampoline[A, B]]) Kleisli[A, B] {
return func(a A) ReaderResult[B] {
initialReader := f(a)
return func(ctx context.Context) result.Result[B] {
rdr := initialReader
for {
// short circuit
if ctx.Err() != nil {
return result.Left[B](context.Cause(ctx))
}
current := rdr(ctx)
rec, e := either.Unwrap(current)
if either.IsLeft(current) {
return result.Left[B](e)
}
if rec.Landed {
return result.Of(rec.Land)
}
rdr = f(rec.Bounce)
}
}
}
}

View File

@@ -0,0 +1,498 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerresult
import (
"context"
"errors"
"fmt"
"testing"
"time"
A "github.com/IBM/fp-go/v2/array"
R "github.com/IBM/fp-go/v2/result"
TR "github.com/IBM/fp-go/v2/tailrec"
"github.com/stretchr/testify/assert"
)
// TestTailRecFactorial tests factorial computation with context
func TestTailRecFactorial(t *testing.T) {
type State struct {
n int
acc int
}
factorialStep := func(state State) ReaderResult[TR.Trampoline[State, int]] {
return func(ctx context.Context) Result[TR.Trampoline[State, int]] {
if state.n <= 0 {
return R.Of(TR.Land[State](state.acc))
}
return R.Of(TR.Bounce[int](State{state.n - 1, state.acc * state.n}))
}
}
factorial := TailRec(factorialStep)
result := factorial(State{5, 1})(context.Background())
assert.Equal(t, R.Of(120), result)
}
// TestTailRecFibonacci tests Fibonacci computation
func TestTailRecFibonacci(t *testing.T) {
type State struct {
n int
prev int
curr int
}
fibStep := func(state State) ReaderResult[TR.Trampoline[State, int]] {
return func(ctx context.Context) Result[TR.Trampoline[State, int]] {
if state.n <= 0 {
return R.Of(TR.Land[State](state.curr))
}
return R.Of(TR.Bounce[int](State{state.n - 1, state.curr, state.prev + state.curr}))
}
}
fib := TailRec(fibStep)
result := fib(State{10, 0, 1})(context.Background())
assert.Equal(t, R.Of(89), result) // 10th Fibonacci number
}
// TestTailRecCountdown tests countdown computation
func TestTailRecCountdown(t *testing.T) {
countdownStep := func(n int) ReaderResult[TR.Trampoline[int, int]] {
return func(ctx context.Context) Result[TR.Trampoline[int, int]] {
if n <= 0 {
return R.Of(TR.Land[int](n))
}
return R.Of(TR.Bounce[int](n - 1))
}
}
countdown := TailRec(countdownStep)
result := countdown(10)(context.Background())
assert.Equal(t, R.Of(0), result)
}
// TestTailRecImmediateTermination tests immediate termination (Right on first call)
func TestTailRecImmediateTermination(t *testing.T) {
immediateStep := func(n int) ReaderResult[TR.Trampoline[int, int]] {
return func(ctx context.Context) Result[TR.Trampoline[int, int]] {
return R.Of(TR.Land[int](n * 2))
}
}
immediate := TailRec(immediateStep)
result := immediate(42)(context.Background())
assert.Equal(t, R.Of(84), result)
}
// TestTailRecStackSafety tests that TailRec handles large iterations without stack overflow
func TestTailRecStackSafety(t *testing.T) {
countdownStep := func(n int) ReaderResult[TR.Trampoline[int, int]] {
return func(ctx context.Context) Result[TR.Trampoline[int, int]] {
if n <= 0 {
return R.Of(TR.Land[int](n))
}
return R.Of(TR.Bounce[int](n - 1))
}
}
countdown := TailRec(countdownStep)
result := countdown(10000)(context.Background())
assert.Equal(t, R.Of(0), result)
}
// TestTailRecSumList tests summing a list
func TestTailRecSumList(t *testing.T) {
type State struct {
list []int
sum int
}
sumStep := func(state State) ReaderResult[TR.Trampoline[State, int]] {
return func(ctx context.Context) Result[TR.Trampoline[State, int]] {
if A.IsEmpty(state.list) {
return R.Of(TR.Land[State](state.sum))
}
return R.Of(TR.Bounce[int](State{state.list[1:], state.sum + state.list[0]}))
}
}
sumList := TailRec(sumStep)
result := sumList(State{[]int{1, 2, 3, 4, 5}, 0})(context.Background())
assert.Equal(t, R.Of(15), result)
}
// TestTailRecCollatzConjecture tests the Collatz conjecture
func TestTailRecCollatzConjecture(t *testing.T) {
collatzStep := func(n int) ReaderResult[TR.Trampoline[int, int]] {
return func(ctx context.Context) Result[TR.Trampoline[int, int]] {
if n <= 1 {
return R.Of(TR.Land[int](n))
}
if n%2 == 0 {
return R.Of(TR.Bounce[int](n / 2))
}
return R.Of(TR.Bounce[int](3*n + 1))
}
}
collatz := TailRec(collatzStep)
result := collatz(10)(context.Background())
assert.Equal(t, R.Of(1), result)
}
// TestTailRecGCD tests greatest common divisor
func TestTailRecGCD(t *testing.T) {
type State struct {
a int
b int
}
gcdStep := func(state State) ReaderResult[TR.Trampoline[State, int]] {
return func(ctx context.Context) Result[TR.Trampoline[State, int]] {
if state.b == 0 {
return R.Of(TR.Land[State](state.a))
}
return R.Of(TR.Bounce[int](State{state.b, state.a % state.b}))
}
}
gcd := TailRec(gcdStep)
result := gcd(State{48, 18})(context.Background())
assert.Equal(t, R.Of(6), result)
}
// TestTailRecErrorPropagation tests that errors are properly propagated
func TestTailRecErrorPropagation(t *testing.T) {
expectedErr := errors.New("computation error")
errorStep := func(n int) ReaderResult[TR.Trampoline[int, int]] {
return func(ctx context.Context) Result[TR.Trampoline[int, int]] {
if n == 5 {
return R.Left[TR.Trampoline[int, int]](expectedErr)
}
if n <= 0 {
return R.Of(TR.Land[int](n))
}
return R.Of(TR.Bounce[int](n - 1))
}
}
computation := TailRec(errorStep)
result := computation(10)(context.Background())
assert.True(t, R.IsLeft(result))
_, err := R.Unwrap(result)
assert.Equal(t, expectedErr, err)
}
// TestTailRecContextCancellationImmediate tests short circuit when context is already canceled
func TestTailRecContextCancellationImmediate(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel() // Cancel immediately before execution
stepExecuted := false
countdownStep := func(n int) ReaderResult[TR.Trampoline[int, int]] {
return func(ctx context.Context) Result[TR.Trampoline[int, int]] {
stepExecuted = true
if n <= 0 {
return R.Of(TR.Land[int](n))
}
return R.Of(TR.Bounce[int](n - 1))
}
}
countdown := TailRec(countdownStep)
result := countdown(10)(ctx)
// Should short circuit without executing any steps
assert.False(t, stepExecuted, "Step should not be executed when context is already canceled")
assert.True(t, R.IsLeft(result))
_, err := R.Unwrap(result)
assert.Equal(t, context.Canceled, err)
}
// TestTailRecContextCancellationDuringExecution tests short circuit when context is canceled during execution
func TestTailRecContextCancellationDuringExecution(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
executionCount := 0
countdownStep := func(n int) ReaderResult[TR.Trampoline[int, int]] {
return func(ctx context.Context) Result[TR.Trampoline[int, int]] {
executionCount++
// Cancel after 3 iterations
if executionCount == 3 {
cancel()
}
if n <= 0 {
return R.Of(TR.Land[int](n))
}
return R.Of(TR.Bounce[int](n - 1))
}
}
countdown := TailRec(countdownStep)
result := countdown(100)(ctx)
// Should stop after cancellation
assert.True(t, R.IsLeft(result))
assert.LessOrEqual(t, executionCount, 4, "Should stop shortly after cancellation")
_, err := R.Unwrap(result)
assert.Equal(t, context.Canceled, err)
}
// TestTailRecContextWithTimeout tests behavior with timeout context
func TestTailRecContextWithTimeout(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
defer cancel()
executionCount := 0
slowStep := func(n int) ReaderResult[TR.Trampoline[int, int]] {
return func(ctx context.Context) Result[TR.Trampoline[int, int]] {
executionCount++
// Simulate slow computation
time.Sleep(20 * time.Millisecond)
if n <= 0 {
return R.Of(TR.Land[int](n))
}
return R.Of(TR.Bounce[int](n - 1))
}
}
computation := TailRec(slowStep)
result := computation(100)(ctx)
// Should timeout and return error
assert.True(t, R.IsLeft(result))
assert.Less(t, executionCount, 100, "Should not complete all iterations due to timeout")
_, err := R.Unwrap(result)
assert.Equal(t, context.DeadlineExceeded, err)
}
// TestTailRecContextWithCause tests that context.Cause is properly returned
func TestTailRecContextWithCause(t *testing.T) {
customErr := errors.New("custom cancellation reason")
ctx, cancel := context.WithCancelCause(context.Background())
cancel(customErr)
countdownStep := func(n int) ReaderResult[TR.Trampoline[int, int]] {
return func(ctx context.Context) Result[TR.Trampoline[int, int]] {
if n <= 0 {
return R.Of(TR.Land[int](n))
}
return R.Of(TR.Bounce[int](n - 1))
}
}
countdown := TailRec(countdownStep)
result := countdown(10)(ctx)
assert.True(t, R.IsLeft(result))
_, err := R.Unwrap(result)
assert.Equal(t, customErr, err)
}
// TestTailRecContextCancellationMultipleIterations tests that cancellation is checked on each iteration
func TestTailRecContextCancellationMultipleIterations(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
executionCount := 0
maxExecutions := 5
countdownStep := func(n int) ReaderResult[TR.Trampoline[int, int]] {
return func(ctx context.Context) Result[TR.Trampoline[int, int]] {
executionCount++
if executionCount == maxExecutions {
cancel()
}
if n <= 0 {
return R.Of(TR.Land[int](n))
}
return R.Of(TR.Bounce[int](n - 1))
}
}
countdown := TailRec(countdownStep)
result := countdown(1000)(ctx)
// Should detect cancellation on next iteration check
assert.True(t, R.IsLeft(result))
// Should stop within 1-2 iterations after cancellation
assert.LessOrEqual(t, executionCount, maxExecutions+2)
_, err := R.Unwrap(result)
assert.Equal(t, context.Canceled, err)
}
// TestTailRecContextNotCanceled tests normal execution when context is not canceled
func TestTailRecContextNotCanceled(t *testing.T) {
ctx := context.Background()
executionCount := 0
countdownStep := func(n int) ReaderResult[TR.Trampoline[int, int]] {
return func(ctx context.Context) Result[TR.Trampoline[int, int]] {
executionCount++
if n <= 0 {
return R.Of(TR.Land[int](n))
}
return R.Of(TR.Bounce[int](n - 1))
}
}
countdown := TailRec(countdownStep)
result := countdown(10)(ctx)
assert.Equal(t, 11, executionCount) // 10, 9, 8, ..., 1, 0
assert.Equal(t, R.Of(0), result)
}
// TestTailRecPowerOfTwo tests computing power of 2
func TestTailRecPowerOfTwo(t *testing.T) {
type State struct {
exponent int
result int
target int
}
powerStep := func(state State) ReaderResult[TR.Trampoline[State, int]] {
return func(ctx context.Context) Result[TR.Trampoline[State, int]] {
if state.exponent >= state.target {
return R.Of(TR.Land[State](state.result))
}
return R.Of(TR.Bounce[int](State{state.exponent + 1, state.result * 2, state.target}))
}
}
power := TailRec(powerStep)
result := power(State{0, 1, 10})(context.Background())
assert.Equal(t, R.Of(1024), result) // 2^10
}
// TestTailRecFindInRange tests finding a value in a range
func TestTailRecFindInRange(t *testing.T) {
type State struct {
current int
max int
target int
}
findStep := func(state State) ReaderResult[TR.Trampoline[State, int]] {
return func(ctx context.Context) Result[TR.Trampoline[State, int]] {
if state.current >= state.max {
return R.Of(TR.Land[State](-1)) // Not found
}
if state.current == state.target {
return R.Of(TR.Land[State](state.current)) // Found
}
return R.Of(TR.Bounce[int](State{state.current + 1, state.max, state.target}))
}
}
find := TailRec(findStep)
result := find(State{0, 100, 42})(context.Background())
assert.Equal(t, R.Of(42), result)
}
// TestTailRecFindNotInRange tests finding a value not in range
func TestTailRecFindNotInRange(t *testing.T) {
type State struct {
current int
max int
target int
}
findStep := func(state State) ReaderResult[TR.Trampoline[State, int]] {
return func(ctx context.Context) Result[TR.Trampoline[State, int]] {
if state.current >= state.max {
return R.Of(TR.Land[State](-1)) // Not found
}
if state.current == state.target {
return R.Of(TR.Land[State](state.current)) // Found
}
return R.Of(TR.Bounce[int](State{state.current + 1, state.max, state.target}))
}
}
find := TailRec(findStep)
result := find(State{0, 100, 200})(context.Background())
assert.Equal(t, R.Of(-1), result)
}
// TestTailRecWithContextValue tests that context values are accessible
func TestTailRecWithContextValue(t *testing.T) {
type contextKey string
const multiplierKey contextKey = "multiplier"
ctx := context.WithValue(context.Background(), multiplierKey, 3)
countdownStep := func(n int) ReaderResult[TR.Trampoline[int, int]] {
return func(ctx context.Context) Result[TR.Trampoline[int, int]] {
if n <= 0 {
multiplier := ctx.Value(multiplierKey).(int)
return R.Of(TR.Land[int](n * multiplier))
}
return R.Of(TR.Bounce[int](n - 1))
}
}
countdown := TailRec(countdownStep)
result := countdown(5)(ctx)
assert.Equal(t, R.Of(0), result) // 0 * 3 = 0
}
// TestTailRecComplexState tests with complex state structure
func TestTailRecComplexState(t *testing.T) {
type ComplexState struct {
counter int
sum int
product int
completed bool
}
complexStep := func(state ComplexState) ReaderResult[TR.Trampoline[ComplexState, string]] {
return func(ctx context.Context) Result[TR.Trampoline[ComplexState, string]] {
if state.counter <= 0 || state.completed {
result := fmt.Sprintf("sum=%d, product=%d", state.sum, state.product)
return R.Of(TR.Land[ComplexState](result))
}
newState := ComplexState{
counter: state.counter - 1,
sum: state.sum + state.counter,
product: state.product * state.counter,
completed: state.counter == 1,
}
return R.Of(TR.Bounce[string](newState))
}
}
computation := TailRec(complexStep)
result := computation(ComplexState{5, 0, 1, false})(context.Background())
assert.Equal(t, R.Of("sum=15, product=120"), result)
}

View File

@@ -0,0 +1,84 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache LicensVersion 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerresult
import (
"context"
"time"
RD "github.com/IBM/fp-go/v2/reader"
R "github.com/IBM/fp-go/v2/retry"
RG "github.com/IBM/fp-go/v2/retry/generic"
)
//go:inline
func Retrying[A any](
policy R.RetryPolicy,
action Kleisli[R.RetryStatus, A],
check func(Result[A]) bool,
) ReaderResult[A] {
// delayWithCancel implements a context-aware delay mechanism for retry operations.
// It creates a timeout context that will be cancelled when either:
// 1. The delay duration expires (normal case), or
// 2. The parent context is cancelled (early termination)
//
// The function waits on timeoutCtx.Done(), which will be signaled in either case:
// - If the delay expires, timeoutCtx is cancelled by the timeout
// - If the parent ctx is cancelled, timeoutCtx inherits the cancellation
//
// After the wait completes, we dispatch to the next action by calling ri(ctx)().
// This works correctly because the action is wrapped in WithContextK, which handles
// context cancellation by checking ctx.Err() and returning an appropriate error
// (context.Canceled or context.DeadlineExceeded) when the context is cancelled.
//
// This design ensures that:
// - Retry delays respect context cancellation and terminate immediately
// - The cancellation error propagates correctly through the retry chain
// - No unnecessary delays occur when the context is already cancelled
delayWithCancel := func(delay time.Duration) RD.Operator[context.Context, R.RetryStatus, R.RetryStatus] {
return func(ri Reader[context.Context, R.RetryStatus]) Reader[context.Context, R.RetryStatus] {
return func(ctx context.Context) R.RetryStatus {
// Create a timeout context that will be cancelled when either:
// - The delay duration expires, or
// - The parent context is cancelled
timeoutCtx, cancelTimeout := context.WithTimeout(ctx, delay)
defer cancelTimeout()
// Wait for either the timeout or parent context cancellation
<-timeoutCtx.Done()
// Dispatch to the next action with the original context.
// WithContextK will handle context cancellation correctly.
return ri(ctx)
}
}
}
// get an implementation for the types
return RG.Retrying(
RD.Chain[context.Context, Result[A], Result[A]],
RD.Chain[context.Context, R.RetryStatus, Result[A]],
RD.Of[context.Context, Result[A]],
RD.Of[context.Context, R.RetryStatus],
delayWithCancel,
policy,
WithContextK(action),
check,
)
}

View File

@@ -13,26 +13,59 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// package readerresult implements a specialization of the Reader monad assuming a golang context as the context of the monad and a standard golang error // Package readerresult implements a specialization of the Reader monad assuming a golang context as the context of the monad and a standard golang error.
//
// # Pure vs Effectful Functions
//
// This package distinguishes between pure (side-effect free) and effectful (side-effectful) functions:
//
// EFFECTFUL FUNCTIONS (depend on context.Context):
// - ReaderResult[A]: func(context.Context) (A, error) - Effectful computation that needs context
// - These functions are effectful because context.Context is effectful (can be cancelled, has deadlines, carries values)
// - Use for: operations that need cancellation, timeouts, context values, or any context-dependent behavior
// - Examples: database queries, HTTP requests, operations that respect cancellation
//
// PURE FUNCTIONS (side-effect free):
// - func(State) (Value, error) - Pure computation that only depends on state, not context
// - func(State) Value - Pure transformation without errors
// - These functions are pure because they only read from their input state and don't depend on external context
// - Use for: parsing, validation, calculations, data transformations that don't need context
// - Examples: JSON parsing, input validation, mathematical computations
//
// The package provides different bind operations for each:
// - Bind: For effectful ReaderResult computations (State -> ReaderResult[Value])
// - BindResultK: For pure functions with errors (State -> (Value, error))
// - Let: For pure functions without errors (State -> Value)
// - BindReaderK: For context-dependent pure functions (State -> Reader[Context, Value])
// - BindEitherK: For pure Result/Either values (State -> Result[Value])
package readerresult package readerresult
import ( import (
"context" "context"
"github.com/IBM/fp-go/v2/either" "github.com/IBM/fp-go/v2/either"
"github.com/IBM/fp-go/v2/endomorphism"
"github.com/IBM/fp-go/v2/optics/lens"
"github.com/IBM/fp-go/v2/optics/prism"
"github.com/IBM/fp-go/v2/option" "github.com/IBM/fp-go/v2/option"
"github.com/IBM/fp-go/v2/reader" "github.com/IBM/fp-go/v2/reader"
"github.com/IBM/fp-go/v2/readereither" "github.com/IBM/fp-go/v2/readereither"
"github.com/IBM/fp-go/v2/result" "github.com/IBM/fp-go/v2/result"
"github.com/IBM/fp-go/v2/tailrec"
) )
type ( type (
Option[A any] = option.Option[A] Option[A any] = option.Option[A]
Either[A any] = either.Either[error, A] Either[A any] = either.Either[error, A]
Result[A any] = result.Result[A] Result[A any] = result.Result[A]
Reader[R, A any] = reader.Reader[R, A]
// ReaderResult is a specialization of the Reader monad for the typical golang scenario // ReaderResult is a specialization of the Reader monad for the typical golang scenario
ReaderResult[A any] = readereither.ReaderEither[context.Context, error, A] ReaderResult[A any] = readereither.ReaderEither[context.Context, error, A]
Kleisli[A, B any] = reader.Reader[A, ReaderResult[B]] Kleisli[A, B any] = reader.Reader[A, ReaderResult[B]]
Operator[A, B any] = Kleisli[ReaderResult[A], B] Operator[A, B any] = Kleisli[ReaderResult[A], B]
Endomorphism[A any] = endomorphism.Endomorphism[A]
Prism[S, T any] = prism.Prism[S, T]
Lens[S, T any] = lens.Lens[S, T]
Trampoline[A, B any] = tailrec.Trampoline[A, B]
) )

View File

@@ -0,0 +1,209 @@
// Copyright (c) 2024 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statereaderioresult
import (
"github.com/IBM/fp-go/v2/function"
A "github.com/IBM/fp-go/v2/internal/apply"
C "github.com/IBM/fp-go/v2/internal/chain"
F "github.com/IBM/fp-go/v2/internal/functor"
)
// Do starts a do-notation chain for building computations in a fluent style.
// This is typically used with Bind, Let, and other combinators to compose
// stateful, context-dependent computations that can fail.
//
// Example:
//
// type State struct {
// name string
// age int
// }
// result := function.Pipe2(
// statereaderioresult.Do[AppState](State{}),
// statereaderioresult.Bind(...),
// statereaderioresult.Let(...),
// )
//
//go:inline
func Do[ST, A any](
empty A,
) StateReaderIOResult[ST, A] {
return Of[ST](empty)
}
// Bind executes a computation and binds its result to a field in the accumulator state.
// This is used in do-notation to sequence dependent computations.
//
// Example:
//
// result := function.Pipe2(
// statereaderioresult.Do[AppState](State{}),
// statereaderioresult.Bind(
// func(name string) func(State) State {
// return func(s State) State { return State{name: name, age: s.age} }
// },
// func(s State) statereaderioresult.StateReaderIOResult[AppState, string] {
// return statereaderioresult.Of[AppState]("John")
// },
// ),
// )
//
//go:inline
func Bind[ST, S1, S2, T any](
setter func(T) func(S1) S2,
f Kleisli[ST, S1, T],
) Operator[ST, S1, S2] {
return C.Bind(
Chain[ST, S1, S2],
Map[ST, T, S2],
setter,
f,
)
}
// Let computes a derived value and binds it to a field in the accumulator state.
// Unlike Bind, this does not execute a monadic computation, just a pure function.
//
// Example:
//
// result := function.Pipe2(
// statereaderioresult.Do[AppState](State{age: 25}),
// statereaderioresult.Let(
// func(isAdult bool) func(State) State {
// return func(s State) State { return State{age: s.age, isAdult: isAdult} }
// },
// func(s State) bool { return s.age >= 18 },
// ),
// )
//
//go:inline
func Let[ST, S1, S2, T any](
key func(T) func(S1) S2,
f func(S1) T,
) Operator[ST, S1, S2] {
return F.Let(
Map[ST, S1, S2],
key,
f,
)
}
// LetTo binds a constant value to a field in the accumulator state.
//
// Example:
//
// result := function.Pipe2(
// statereaderioresult.Do[AppState](State{}),
// statereaderioresult.LetTo(
// func(status string) func(State) State {
// return func(s State) State { return State{...s, status: status} }
// },
// "active",
// ),
// )
//
//go:inline
func LetTo[ST, S1, S2, T any](
key func(T) func(S1) S2,
b T,
) Operator[ST, S1, S2] {
return F.LetTo(
Map[ST, S1, S2],
key,
b,
)
}
// BindTo wraps a value in a simple constructor, typically used to start a do-notation chain
// after getting an initial value.
//
// Example:
//
// result := function.Pipe2(
// statereaderioresult.Of[AppState](42),
// statereaderioresult.BindTo[AppState](func(x int) State { return State{value: x} }),
// )
//
//go:inline
func BindTo[ST, S1, T any](
setter func(T) S1,
) Operator[ST, T, S1] {
return C.BindTo(
Map[ST, T, S1],
setter,
)
}
// ApS applies a computation in sequence and binds the result to a field.
// This is the applicative version of Bind.
//
//go:inline
func ApS[ST, S1, S2, T any](
setter func(T) func(S1) S2,
fa StateReaderIOResult[ST, T],
) Operator[ST, S1, S2] {
return A.ApS(
Ap[S2, ST, T],
Map[ST, S1, func(T) S2],
setter,
fa,
)
}
// ApSL is a lens-based variant of ApS for working with nested structures.
// It uses a lens to focus on a specific field in the state.
//
//go:inline
func ApSL[ST, S, T any](
lens Lens[S, T],
fa StateReaderIOResult[ST, T],
) Endomorphism[StateReaderIOResult[ST, S]] {
return ApS(lens.Set, fa)
}
// BindL is a lens-based variant of Bind for working with nested structures.
// It uses a lens to focus on a specific field in the state.
//
//go:inline
func BindL[ST, S, T any](
lens Lens[S, T],
f Kleisli[ST, T, T],
) Endomorphism[StateReaderIOResult[ST, S]] {
return Bind(lens.Set, function.Flow2(lens.Get, f))
}
// LetL is a lens-based variant of Let for working with nested structures.
// It uses a lens to focus on a specific field in the state.
//
//go:inline
func LetL[ST, S, T any](
lens Lens[S, T],
f Endomorphism[T],
) Endomorphism[StateReaderIOResult[ST, S]] {
return Let[ST](lens.Set, function.Flow2(lens.Get, f))
}
// LetToL is a lens-based variant of LetTo for working with nested structures.
// It uses a lens to focus on a specific field in the state.
//
//go:inline
func LetToL[ST, S, T any](
lens Lens[S, T],
b T,
) Endomorphism[StateReaderIOResult[ST, S]] {
return LetTo[ST](lens.Set, b)
}

View File

@@ -0,0 +1,147 @@
// Copyright (c) 2024 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package statereaderioresult provides a functional programming abstraction that combines
// four powerful concepts: State, Reader, IO, and Result monads, specialized for Go's context.Context.
//
// # StateReaderIOResult
//
// StateReaderIOResult[S, A] represents a computation that:
// - Manages state of type S (State)
// - Depends on a [context.Context] (Reader)
// - Performs side effects (IO)
// - Can fail with an [error] or succeed with a value of type A (Result)
//
// This is a specialization of StateReaderIOEither with:
// - Context type fixed to [context.Context]
// - Error type fixed to [error]
//
// This is particularly useful for:
// - Stateful computations with dependency injection using Go contexts
// - Error handling in effectful computations with state
// - Composing operations that need access to context, manage state, and can fail
// - Working with Go's standard context patterns (cancellation, deadlines, values)
//
// # Core Operations
//
// Construction:
// - Of/Right: Create a successful computation with a value
// - Left: Create a failed computation with an error
// - FromState: Lift a State into StateReaderIOResult
// - FromIO: Lift an IO into StateReaderIOResult
// - FromResult: Lift a Result into StateReaderIOResult
// - FromIOResult: Lift an IOResult into StateReaderIOResult
// - FromReaderIOResult: Lift a ReaderIOResult into StateReaderIOResult
//
// Transformation:
// - Map: Transform the success value
// - Chain: Sequence dependent computations (monadic bind)
// - Flatten: Flatten nested StateReaderIOResult
//
// Combination:
// - Ap: Apply a function in a context to a value in a context
//
// Context Access:
// - Asks: Get a value derived from the context
// - Local: Run a computation with a modified context
//
// Kleisli Arrows:
// - FromResultK: Lift a Result-returning function to a Kleisli arrow
// - FromIOK: Lift an IO-returning function to a Kleisli arrow
// - FromIOResultK: Lift an IOResult-returning function to a Kleisli arrow
// - FromReaderIOResultK: Lift a ReaderIOResult-returning function to a Kleisli arrow
// - ChainResultK: Chain with a Result-returning function
// - ChainIOResultK: Chain with an IOResult-returning function
// - ChainReaderIOResultK: Chain with a ReaderIOResult-returning function
//
// Do Notation (Monadic Composition):
// - Do: Start a do-notation chain
// - Bind: Bind a value from a computation
// - BindTo: Bind a value to a simple constructor
// - Let: Compute a derived value
// - LetTo: Set a constant value
// - ApS: Apply in sequence (for applicative composition)
// - BindL/ApSL/LetL/LetToL: Lens-based variants for working with nested structures
//
// # Example Usage
//
// type AppState struct {
// RequestCount int
// LastError error
// }
//
// // A computation that manages state, depends on context, performs IO, and can fail
// func processRequest(data string) statereaderioresult.StateReaderIOResult[AppState, string] {
// return func(state AppState) readerioresult.ReaderIOResult[pair.Pair[AppState, string]] {
// return func(ctx context.Context) ioresult.IOResult[pair.Pair[AppState, string]] {
// return func() result.Result[pair.Pair[AppState, string]] {
// // Check context for cancellation
// if ctx.Err() != nil {
// return result.Error[pair.Pair[AppState, string]](ctx.Err())
// }
// // Update state
// newState := AppState{RequestCount: state.RequestCount + 1}
// // Perform IO operations
// return result.Of(pair.MakePair(newState, "processed: " + data))
// }
// }
// }
// }
//
// // Compose operations using do-notation
// result := function.Pipe3(
// statereaderioresult.Do[AppState](State{}),
// statereaderioresult.Bind(
// func(result string) func(State) State { return func(s State) State { return State{result: result} } },
// func(s State) statereaderioresult.StateReaderIOResult[AppState, string] {
// return processRequest(s.input)
// },
// ),
// statereaderioresult.Map[AppState](func(s State) string { return s.result }),
// )
//
// // Execute with initial state and context
// initialState := AppState{RequestCount: 0}
// ctx := context.Background()
// outcome := result(initialState)(ctx)() // Returns result.Result[pair.Pair[AppState, string]]
//
// # Context Integration
//
// This package is designed to work seamlessly with Go's context.Context:
//
// // Using context values
// getUserID := statereaderioresult.Asks[AppState, string](func(ctx context.Context) statereaderioresult.StateReaderIOResult[AppState, string] {
// userID, ok := ctx.Value("userID").(string)
// if !ok {
// return statereaderioresult.Left[AppState, string](errors.New("missing userID"))
// }
// return statereaderioresult.Of[AppState](userID)
// })
//
// // Using context cancellation
// withTimeout := statereaderioresult.Local[AppState, string](func(ctx context.Context) context.Context {
// ctx, _ = context.WithTimeout(ctx, 5*time.Second)
// return ctx
// })
//
// # Monad Laws
//
// StateReaderIOResult satisfies the monad laws:
// - Left Identity: Of(a) >>= f ≡ f(a)
// - Right Identity: m >>= Of ≡ m
// - Associativity: (m >>= f) >>= g ≡ m >>= (x => f(x) >>= g)
//
// These laws are verified in the testing subpackage.
package statereaderioresult

View File

@@ -0,0 +1,41 @@
// Copyright (c) 2024 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statereaderioresult
import (
"context"
"github.com/IBM/fp-go/v2/eq"
"github.com/IBM/fp-go/v2/function"
RIOR "github.com/IBM/fp-go/v2/readerioresult"
)
// Eq implements the equals predicate for values contained in the [StateReaderIOResult] monad
func Eq[S, A any](eqr eq.Eq[ReaderIOResult[Pair[S, A]]]) func(S) eq.Eq[StateReaderIOResult[S, A]] {
return func(s S) eq.Eq[StateReaderIOResult[S, A]] {
return eq.FromEquals(func(l, r StateReaderIOResult[S, A]) bool {
return eqr.Equals(l(s), r(s))
})
}
}
// FromStrictEquals constructs an [eq.Eq] from the canonical comparison function
func FromStrictEquals[S comparable, A comparable]() func(context.Context) func(S) eq.Eq[StateReaderIOResult[S, A]] {
return function.Flow2(
RIOR.FromStrictEquals[context.Context, Pair[S, A]](),
Eq[S, A],
)
}

View File

@@ -0,0 +1,103 @@
// Copyright (c) 2024 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statereaderioresult
import (
"github.com/IBM/fp-go/v2/internal/applicative"
"github.com/IBM/fp-go/v2/internal/functor"
"github.com/IBM/fp-go/v2/internal/monad"
"github.com/IBM/fp-go/v2/internal/pointed"
)
type stateReaderIOResultPointed[
S, A any,
] struct{}
type stateReaderIOResultFunctor[
S, A, B any,
] struct{}
type stateReaderIOResultApplicative[
S, A, B any,
] struct{}
type stateReaderIOResultMonad[
S, A, B any,
] struct{}
func (o *stateReaderIOResultPointed[S, A]) Of(a A) StateReaderIOResult[S, A] {
return Of[S](a)
}
func (o *stateReaderIOResultMonad[S, A, B]) Of(a A) StateReaderIOResult[S, A] {
return Of[S](a)
}
func (o *stateReaderIOResultApplicative[S, A, B]) Of(a A) StateReaderIOResult[S, A] {
return Of[S](a)
}
func (o *stateReaderIOResultMonad[S, A, B]) Map(f func(A) B) Operator[S, A, B] {
return Map[S](f)
}
func (o *stateReaderIOResultApplicative[S, A, B]) Map(f func(A) B) Operator[S, A, B] {
return Map[S](f)
}
func (o *stateReaderIOResultFunctor[S, A, B]) Map(f func(A) B) Operator[S, A, B] {
return Map[S](f)
}
func (o *stateReaderIOResultMonad[S, A, B]) Chain(f Kleisli[S, A, B]) Operator[S, A, B] {
return Chain(f)
}
func (o *stateReaderIOResultMonad[S, A, B]) Ap(fa StateReaderIOResult[S, A]) Operator[S, func(A) B, B] {
return Ap[B](fa)
}
func (o *stateReaderIOResultApplicative[S, A, B]) Ap(fa StateReaderIOResult[S, A]) Operator[S, func(A) B, B] {
return Ap[B](fa)
}
// Pointed implements the [pointed.Pointed] operations for [StateReaderIOResult]
func Pointed[
S, A any,
]() pointed.Pointed[A, StateReaderIOResult[S, A]] {
return &stateReaderIOResultPointed[S, A]{}
}
// Functor implements the [functor.Functor] operations for [StateReaderIOResult]
func Functor[
S, A, B any,
]() functor.Functor[A, B, StateReaderIOResult[S, A], StateReaderIOResult[S, B]] {
return &stateReaderIOResultFunctor[S, A, B]{}
}
// Applicative implements the [applicative.Applicative] operations for [StateReaderIOResult]
func Applicative[
S, A, B any,
]() applicative.Applicative[A, B, StateReaderIOResult[S, A], StateReaderIOResult[S, B], StateReaderIOResult[S, func(A) B]] {
return &stateReaderIOResultApplicative[S, A, B]{}
}
// Monad implements the [monad.Monad] operations for [StateReaderIOResult]
func Monad[
S, A, B any,
]() monad.Monad[A, B, StateReaderIOResult[S, A], StateReaderIOResult[S, B], StateReaderIOResult[S, func(A) B]] {
return &stateReaderIOResultMonad[S, A, B]{}
}

View File

@@ -0,0 +1,101 @@
// Copyright (c) 2024 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statereaderioresult
import "github.com/IBM/fp-go/v2/statereaderioeither"
// WithResource constructs a function that creates a resource with state management, operates on it,
// and then releases the resource. This ensures proper resource cleanup even in the presence of errors,
// following the Resource Acquisition Is Initialization (RAII) pattern.
//
// The state is threaded through all operations: resource creation, usage, and release.
//
// The resource lifecycle with state management is:
// 1. onCreate: Acquires the resource (may modify state)
// 2. use: Operates on the resource with current state (provided as argument to the returned function)
// 3. onRelease: Releases the resource with current state (called regardless of success or failure)
//
// Type parameters:
// - A: The type of the result produced by using the resource
// - S: The state type that is threaded through all operations
// - RES: The resource type
// - ANY: The type returned by the release function (typically ignored)
//
// Parameters:
// - onCreate: A stateful computation that acquires the resource
// - onRelease: A stateful function that releases the resource, called with the resource and current state,
// executed regardless of errors
//
// Returns:
//
// A function that takes a resource-using function and returns a StateReaderIOResult that manages
// the resource lifecycle with state
//
// Example:
//
// type AppState struct {
// openFiles int
// }
//
// // Resource creation that updates state
// openFile := func(filename string) StateReaderIOResult[AppState, *File] {
// return func(state AppState) ReaderIOResult[Pair[AppState, *File]] {
// return func(ctx context.Context) IOResult[Pair[AppState, *File]] {
// return func() Result[Pair[AppState, *File]] {
// file, err := os.Open(filename)
// if err != nil {
// return result.Error[Pair[AppState, *File]](err)
// }
// newState := AppState{openFiles: state.openFiles + 1}
// return result.Of(pair.MakePair(newState, file))
// }
// }
// }
// }
//
// // Resource release that updates state
// closeFile := func(f *File) StateReaderIOResult[AppState, int] {
// return func(state AppState) ReaderIOResult[Pair[AppState, int]] {
// return func(ctx context.Context) IOResult[Pair[AppState, int]] {
// return func() Result[Pair[AppState, int]] {
// f.Close()
// newState := AppState{openFiles: state.openFiles - 1}
// return result.Of(pair.MakePair(newState, 0))
// }
// }
// }
// }
//
// // Use the resource with automatic cleanup
// withFile := WithResource(
// openFile("data.txt"),
// closeFile,
// )
//
// result := withFile(func(f *File) StateReaderIOResult[AppState, string] {
// return readContent(f) // File will be closed automatically
// })
//
// // Execute the computation
// initialState := AppState{openFiles: 0}
// ctx := context.Background()
// outcome := result(initialState)(ctx)()
func WithResource[A, S, RES, ANY any](
onCreate StateReaderIOResult[S, RES],
onRelease Kleisli[S, RES, ANY],
) Kleisli[S, Kleisli[S, RES, A], A] {
return statereaderioeither.WithResource[A](onCreate, onRelease)
}

View File

@@ -0,0 +1,415 @@
// Copyright (c) 2024 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statereaderioresult
import (
"context"
"errors"
"testing"
P "github.com/IBM/fp-go/v2/pair"
RES "github.com/IBM/fp-go/v2/result"
"github.com/stretchr/testify/assert"
)
// resourceState tracks the lifecycle of resources for testing
type resourceState struct {
resourcesCreated int
resourcesReleased int
lastError error
}
// mockResource represents a test resource
type mockResource struct {
id int
isValid bool
}
// TestWithResourceSuccess tests successful resource creation, usage, and release
func TestWithResourceSuccess(t *testing.T) {
initialState := resourceState{resourcesCreated: 0, resourcesReleased: 0}
ctx := context.Background()
// Create a resource
onCreate := func(s resourceState) ReaderIOResult[Pair[resourceState, mockResource]] {
return func(ctx context.Context) IOResult[Pair[resourceState, mockResource]] {
return func() Result[Pair[resourceState, mockResource]] {
newState := resourceState{
resourcesCreated: s.resourcesCreated + 1,
resourcesReleased: s.resourcesReleased,
}
res := mockResource{id: newState.resourcesCreated, isValid: true}
return RES.Of(P.MakePair(newState, res))
}
}
}
// Release a resource
onRelease := func(res mockResource) StateReaderIOResult[resourceState, int] {
return func(s resourceState) ReaderIOResult[Pair[resourceState, int]] {
return func(ctx context.Context) IOResult[Pair[resourceState, int]] {
return func() Result[Pair[resourceState, int]] {
newState := resourceState{
resourcesCreated: s.resourcesCreated,
resourcesReleased: s.resourcesReleased + 1,
}
return RES.Of(P.MakePair(newState, 0))
}
}
}
}
// Use the resource
useResource := func(res mockResource) StateReaderIOResult[resourceState, string] {
return func(s resourceState) ReaderIOResult[Pair[resourceState, string]] {
return func(ctx context.Context) IOResult[Pair[resourceState, string]] {
return func() Result[Pair[resourceState, string]] {
result := "used resource " + string(rune(res.id+'0'))
return RES.Of(P.MakePair(s, result))
}
}
}
}
withResource := WithResource[string](onCreate, onRelease)
result := withResource(useResource)
outcome := result(initialState)(ctx)()
assert.True(t, RES.IsRight(outcome))
RES.Map(func(p Pair[resourceState, string]) Pair[resourceState, string] {
state := P.Head(p)
value := P.Tail(p)
// Verify state updates
// Note: Final state comes from the use function, not the release function
// onCreate: 0->1, use: sees 1 (doesn't modify), release: sees 1 and increments released
// The final state is from use function which saw state=1 with resourcesReleased=0
assert.Equal(t, 1, state.resourcesCreated, "Resource should be created once")
assert.Equal(t, 0, state.resourcesReleased, "Final state is from use function, before release")
// Verify result
assert.Equal(t, "used resource 1", value)
return p
})(outcome)
}
// TestWithResourceErrorInCreate tests error handling when resource creation fails
func TestWithResourceErrorInCreate(t *testing.T) {
initialState := resourceState{resourcesCreated: 0, resourcesReleased: 0}
ctx := context.Background()
createError := errors.New("failed to create resource")
// onCreate that fails
onCreate := func(s resourceState) ReaderIOResult[Pair[resourceState, mockResource]] {
return func(ctx context.Context) IOResult[Pair[resourceState, mockResource]] {
return func() Result[Pair[resourceState, mockResource]] {
return RES.Left[Pair[resourceState, mockResource]](createError)
}
}
}
// Release should not be called if onCreate fails
onRelease := func(res mockResource) StateReaderIOResult[resourceState, int] {
return func(s resourceState) ReaderIOResult[Pair[resourceState, int]] {
return func(ctx context.Context) IOResult[Pair[resourceState, int]] {
return func() Result[Pair[resourceState, int]] {
t.Error("onRelease should not be called when onCreate fails")
return RES.Of(P.MakePair(s, 0))
}
}
}
}
useResource := func(res mockResource) StateReaderIOResult[resourceState, string] {
return Of[resourceState]("should not reach here")
}
withResource := WithResource[string](onCreate, onRelease)
result := withResource(useResource)
outcome := result(initialState)(ctx)()
assert.True(t, RES.IsLeft(outcome))
RES.Fold(
func(err error) bool {
assert.Equal(t, createError, err)
return true
},
func(p Pair[resourceState, string]) bool {
t.Error("Expected error but got success")
return false
},
)(outcome)
}
// TestWithResourceErrorInUse tests that resources are released even when usage fails
func TestWithResourceErrorInUse(t *testing.T) {
initialState := resourceState{resourcesCreated: 0, resourcesReleased: 0}
ctx := context.Background()
useError := errors.New("failed to use resource")
// Create a resource
onCreate := func(s resourceState) ReaderIOResult[Pair[resourceState, mockResource]] {
return func(ctx context.Context) IOResult[Pair[resourceState, mockResource]] {
return func() Result[Pair[resourceState, mockResource]] {
newState := resourceState{
resourcesCreated: s.resourcesCreated + 1,
resourcesReleased: s.resourcesReleased,
}
res := mockResource{id: 1, isValid: true}
return RES.Of(P.MakePair(newState, res))
}
}
}
releaseWasCalled := false
// Release should still be called even if use fails
onRelease := func(res mockResource) StateReaderIOResult[resourceState, int] {
return func(s resourceState) ReaderIOResult[Pair[resourceState, int]] {
return func(ctx context.Context) IOResult[Pair[resourceState, int]] {
return func() Result[Pair[resourceState, int]] {
releaseWasCalled = true
newState := resourceState{
resourcesCreated: s.resourcesCreated,
resourcesReleased: s.resourcesReleased + 1,
}
return RES.Of(P.MakePair(newState, 0))
}
}
}
}
// Use that fails
useResource := func(res mockResource) StateReaderIOResult[resourceState, string] {
return Left[resourceState, string](useError)
}
withResource := WithResource[string](onCreate, onRelease)
result := withResource(useResource)
outcome := result(initialState)(ctx)()
assert.True(t, RES.IsLeft(outcome))
assert.True(t, releaseWasCalled, "onRelease should be called even when use fails")
RES.Fold(
func(err error) bool {
assert.Equal(t, useError, err)
return true
},
func(p Pair[resourceState, string]) bool {
t.Error("Expected error but got success")
return false
},
)(outcome)
}
// TestWithResourceStateThreading tests that state is properly threaded through all operations
func TestWithResourceStateThreading(t *testing.T) {
initialState := resourceState{resourcesCreated: 0, resourcesReleased: 0}
ctx := context.Background()
// Create increments counter
onCreate := func(s resourceState) ReaderIOResult[Pair[resourceState, mockResource]] {
return func(ctx context.Context) IOResult[Pair[resourceState, mockResource]] {
return func() Result[Pair[resourceState, mockResource]] {
newState := resourceState{
resourcesCreated: s.resourcesCreated + 1,
resourcesReleased: s.resourcesReleased,
}
res := mockResource{id: newState.resourcesCreated, isValid: true}
return RES.Of(P.MakePair(newState, res))
}
}
}
// Use observes the state after creation
useResource := func(res mockResource) StateReaderIOResult[resourceState, int] {
return func(s resourceState) ReaderIOResult[Pair[resourceState, int]] {
return func(ctx context.Context) IOResult[Pair[resourceState, int]] {
return func() Result[Pair[resourceState, int]] {
// Verify state was updated by onCreate
assert.Equal(t, 1, s.resourcesCreated)
assert.Equal(t, 0, s.resourcesReleased)
return RES.Of(P.MakePair(s, s.resourcesCreated))
}
}
}
}
// Release increments released counter
onRelease := func(res mockResource) StateReaderIOResult[resourceState, int] {
return func(s resourceState) ReaderIOResult[Pair[resourceState, int]] {
return func(ctx context.Context) IOResult[Pair[resourceState, int]] {
return func() Result[Pair[resourceState, int]] {
// Verify state was updated by onCreate and use
assert.Equal(t, 1, s.resourcesCreated)
assert.Equal(t, 0, s.resourcesReleased)
newState := resourceState{
resourcesCreated: s.resourcesCreated,
resourcesReleased: s.resourcesReleased + 1,
}
return RES.Of(P.MakePair(newState, 0))
}
}
}
}
withResource := WithResource[int](onCreate, onRelease)
result := withResource(useResource)
outcome := result(initialState)(ctx)()
assert.True(t, RES.IsRight(outcome))
RES.Map(func(p Pair[resourceState, int]) Pair[resourceState, int] {
finalState := P.Head(p)
value := P.Tail(p)
// Verify final state
// Note: Final state is from the use function, which preserves the state it received
// onCreate: 0->1, use: sees 1, release: sees 1 and increments released to 1
// But final state is from use function where resourcesReleased=0
assert.Equal(t, 1, finalState.resourcesCreated)
assert.Equal(t, 0, finalState.resourcesReleased, "Final state is from use function, before release")
assert.Equal(t, 1, value)
return p
})(outcome)
}
// TestWithResourceMultipleResources tests using WithResource multiple times (nesting)
func TestWithResourceMultipleResources(t *testing.T) {
initialState := resourceState{resourcesCreated: 0, resourcesReleased: 0}
ctx := context.Background()
createResource := func(s resourceState) ReaderIOResult[Pair[resourceState, mockResource]] {
return func(ctx context.Context) IOResult[Pair[resourceState, mockResource]] {
return func() Result[Pair[resourceState, mockResource]] {
newState := resourceState{
resourcesCreated: s.resourcesCreated + 1,
resourcesReleased: s.resourcesReleased,
}
res := mockResource{id: newState.resourcesCreated, isValid: true}
return RES.Of(P.MakePair(newState, res))
}
}
}
releaseResource := func(res mockResource) StateReaderIOResult[resourceState, int] {
return func(s resourceState) ReaderIOResult[Pair[resourceState, int]] {
return func(ctx context.Context) IOResult[Pair[resourceState, int]] {
return func() Result[Pair[resourceState, int]] {
newState := resourceState{
resourcesCreated: s.resourcesCreated,
resourcesReleased: s.resourcesReleased + 1,
}
return RES.Of(P.MakePair(newState, 0))
}
}
}
}
// Create two nested resources
withResource1 := WithResource[int](createResource, releaseResource)
withResource2 := WithResource[int](createResource, releaseResource)
result := withResource1(func(res1 mockResource) StateReaderIOResult[resourceState, int] {
return withResource2(func(res2 mockResource) StateReaderIOResult[resourceState, int] {
// Both resources should be available
return Of[resourceState](res1.id + res2.id)
})
})
outcome := result(initialState)(ctx)()
assert.True(t, RES.IsRight(outcome))
RES.Map(func(p Pair[resourceState, int]) Pair[resourceState, int] {
finalState := P.Head(p)
value := P.Tail(p)
// Both resources created, but final state is from innermost use function
// onCreate1: 0->1, onCreate2: 1->2, use (Of): sees 2
// Release functions execute but their state changes aren't in the final result
assert.Equal(t, 2, finalState.resourcesCreated)
assert.Equal(t, 0, finalState.resourcesReleased, "Final state is from use function, before releases")
// res1.id = 1, res2.id = 2, sum = 3
assert.Equal(t, 3, value)
return p
})(outcome)
}
// TestWithResourceContextCancellation tests behavior with context cancellation
func TestWithResourceContextCancellation(t *testing.T) {
initialState := resourceState{resourcesCreated: 0, resourcesReleased: 0}
ctx, cancel := context.WithCancel(context.Background())
cancel() // Cancel immediately
cancelError := errors.New("context cancelled")
// Create should respect context cancellation
onCreate := func(s resourceState) ReaderIOResult[Pair[resourceState, mockResource]] {
return func(ctx context.Context) IOResult[Pair[resourceState, mockResource]] {
return func() Result[Pair[resourceState, mockResource]] {
if ctx.Err() != nil {
return RES.Left[Pair[resourceState, mockResource]](cancelError)
}
newState := resourceState{
resourcesCreated: s.resourcesCreated + 1,
resourcesReleased: s.resourcesReleased,
}
res := mockResource{id: 1, isValid: true}
return RES.Of(P.MakePair(newState, res))
}
}
}
onRelease := func(res mockResource) StateReaderIOResult[resourceState, int] {
return func(s resourceState) ReaderIOResult[Pair[resourceState, int]] {
return func(ctx context.Context) IOResult[Pair[resourceState, int]] {
return func() Result[Pair[resourceState, int]] {
newState := resourceState{
resourcesCreated: s.resourcesCreated,
resourcesReleased: s.resourcesReleased + 1,
}
return RES.Of(P.MakePair(newState, 0))
}
}
}
}
useResource := func(res mockResource) StateReaderIOResult[resourceState, string] {
return Of[resourceState]("should not reach here")
}
withResource := WithResource[string](onCreate, onRelease)
result := withResource(useResource)
outcome := result(initialState)(ctx)()
assert.True(t, RES.IsLeft(outcome))
RES.Fold(
func(err error) bool {
assert.Equal(t, cancelError, err)
return true
},
func(p Pair[resourceState, string]) bool {
t.Error("Expected error but got success")
return false
},
)(outcome)
}

View File

@@ -0,0 +1,309 @@
// Copyright (c) 2024 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statereaderioresult
import (
"context"
RIORES "github.com/IBM/fp-go/v2/context/readerioresult"
"github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/internal/statet"
RIOR "github.com/IBM/fp-go/v2/readerioresult"
"github.com/IBM/fp-go/v2/result"
)
// Left creates a StateReaderIOResult that represents a failed computation with the given error.
// The error value is immediately available and does not depend on state or context.
//
// Example:
//
// result := statereaderioresult.Left[AppState, string](errors.New("validation failed"))
// // Returns a failed computation that ignores state and context
func Left[S, A any](e error) StateReaderIOResult[S, A] {
return function.Constant1[S](RIORES.Left[Pair[S, A]](e))
}
// Right creates a StateReaderIOResult that represents a successful computation with the given value.
// The value is wrapped and the state is passed through unchanged.
//
// Example:
//
// result := statereaderioresult.Right[AppState](42)
// // Returns a successful computation containing 42
func Right[S, A any](a A) StateReaderIOResult[S, A] {
return statet.Of[StateReaderIOResult[S, A]](RIORES.Of[Pair[S, A]], a)
}
// Of creates a StateReaderIOResult that represents a successful computation with the given value.
// This is the monadic return/pure operation for StateReaderIOResult.
// Equivalent to [Right].
//
// Example:
//
// result := statereaderioresult.Of[AppState](42)
// // Returns a successful computation containing 42
func Of[S, A any](a A) StateReaderIOResult[S, A] {
return Right[S](a)
}
// MonadMap transforms the success value of a StateReaderIOResult using the provided function.
// If the computation fails, the error is propagated unchanged.
// The state is threaded through the computation.
// This is the functor map operation.
//
// Example:
//
// result := statereaderioresult.MonadMap(
// statereaderioresult.Of[AppState](21),
// N.Mul(2),
// ) // Result contains 42
func MonadMap[S, A, B any](fa StateReaderIOResult[S, A], f func(A) B) StateReaderIOResult[S, B] {
return statet.MonadMap[StateReaderIOResult[S, A], StateReaderIOResult[S, B]](
RIORES.MonadMap[Pair[S, A], Pair[S, B]],
fa,
f,
)
}
// Map is the curried version of [MonadMap].
// Returns a function that transforms a StateReaderIOResult.
//
// Example:
//
// double := statereaderioresult.Map[AppState](N.Mul(2))
// result := function.Pipe1(statereaderioresult.Of[AppState](21), double)
func Map[S, A, B any](f func(A) B) Operator[S, A, B] {
return statet.Map[StateReaderIOResult[S, A], StateReaderIOResult[S, B]](
RIORES.Map[Pair[S, A], Pair[S, B]],
f,
)
}
// MonadChain sequences two computations, passing the result of the first to a function
// that produces the second computation. This is the monadic bind operation.
// The state is threaded through both computations.
//
// Example:
//
// result := statereaderioresult.MonadChain(
// statereaderioresult.Of[AppState](5),
// func(x int) statereaderioresult.StateReaderIOResult[AppState, string] {
// return statereaderioresult.Of[AppState](fmt.Sprintf("value: %d", x))
// },
// )
func MonadChain[S, A, B any](fa StateReaderIOResult[S, A], f Kleisli[S, A, B]) StateReaderIOResult[S, B] {
return statet.MonadChain(
RIORES.MonadChain[Pair[S, A], Pair[S, B]],
fa,
f,
)
}
// Chain is the curried version of [MonadChain].
// Returns a function that sequences computations.
//
// Example:
//
// stringify := statereaderioresult.Chain[AppState](func(x int) statereaderioresult.StateReaderIOResult[AppState, string] {
// return statereaderioresult.Of[AppState](fmt.Sprintf("%d", x))
// })
// result := function.Pipe1(statereaderioresult.Of[AppState](42), stringify)
func Chain[S, A, B any](f Kleisli[S, A, B]) Operator[S, A, B] {
return statet.Chain[StateReaderIOResult[S, A]](
RIORES.Chain[Pair[S, A], Pair[S, B]],
f,
)
}
// MonadAp applies a function wrapped in a StateReaderIOResult to a value wrapped in a StateReaderIOResult.
// If either the function or the value fails, the error is propagated.
// The state is threaded through both computations sequentially.
// This is the applicative apply operation.
//
// Example:
//
// fab := statereaderioresult.Of[AppState](N.Mul(2))
// fa := statereaderioresult.Of[AppState](21)
// result := statereaderioresult.MonadAp(fab, fa) // Result contains 42
func MonadAp[B, S, A any](fab StateReaderIOResult[S, func(A) B], fa StateReaderIOResult[S, A]) StateReaderIOResult[S, B] {
return statet.MonadAp[StateReaderIOResult[S, A], StateReaderIOResult[S, B]](
RIORES.MonadMap[Pair[S, A], Pair[S, B]],
RIORES.MonadChain[Pair[S, func(A) B], Pair[S, B]],
fab,
fa,
)
}
// Ap is the curried version of [MonadAp].
// Returns a function that applies a wrapped function to the given wrapped value.
func Ap[B, S, A any](fa StateReaderIOResult[S, A]) Operator[S, func(A) B, B] {
return statet.Ap[StateReaderIOResult[S, A], StateReaderIOResult[S, B], StateReaderIOResult[S, func(A) B]](
RIORES.Map[Pair[S, A], Pair[S, B]],
RIORES.Chain[Pair[S, func(A) B], Pair[S, B]],
fa,
)
}
// FromReaderIOResult lifts a ReaderIOResult into a StateReaderIOResult.
// The state is passed through unchanged.
//
// Example:
//
// riores := readerioresult.Of(42)
// result := statereaderioresult.FromReaderIOResult[AppState](riores)
func FromReaderIOResult[S, A any](fa ReaderIOResult[A]) StateReaderIOResult[S, A] {
return statet.FromF[StateReaderIOResult[S, A]](
RIORES.MonadMap[A],
fa,
)
}
// FromIOResult lifts an IOResult into a StateReaderIOResult.
// The state is passed through unchanged and the context is ignored.
func FromIOResult[S, A any](fa IOResult[A]) StateReaderIOResult[S, A] {
return FromReaderIOResult[S](RIORES.FromIOResult(fa))
}
// FromState lifts a State computation into a StateReaderIOResult.
// The computation cannot fail (uses the error type).
func FromState[S, A any](sa State[S, A]) StateReaderIOResult[S, A] {
return statet.FromState[StateReaderIOResult[S, A]](RIORES.Of[Pair[S, A]], sa)
}
// FromIO lifts an IO computation into a StateReaderIOResult.
// The state is passed through unchanged and the context is ignored.
func FromIO[S, A any](fa IO[A]) StateReaderIOResult[S, A] {
return FromReaderIOResult[S](RIORES.FromIO(fa))
}
// FromResult lifts a Result into a StateReaderIOResult.
// The state is passed through unchanged and the context is ignored.
//
// Example:
//
// result := statereaderioresult.FromResult[AppState](result.Of(42))
func FromResult[S, A any](ma Result[A]) StateReaderIOResult[S, A] {
return result.Fold(Left[S, A], Right[S, A])(ma)
}
// Combinators
// Local runs a computation with a modified context.
// The function f transforms the context before passing it to the computation.
//
// Example:
//
// // Modify context before running computation
// withTimeout := statereaderioresult.Local[AppState](
// func(ctx context.Context) context.Context {
// ctx, _ = context.WithTimeout(ctx, 60*time.Second)
// return ctx
// }
// )
// result := withTimeout(computation)
func Local[S, A any](f func(context.Context) context.Context) func(StateReaderIOResult[S, A]) StateReaderIOResult[S, A] {
return func(ma StateReaderIOResult[S, A]) StateReaderIOResult[S, A] {
return function.Flow2(ma, RIOR.Local[Pair[S, A]](f))
}
}
// Asks creates a computation that derives a value from the context.
// The function receives the context and returns a StateReaderIOResult.
//
// Example:
//
// getValue := statereaderioresult.Asks[AppState, string](
// func(ctx context.Context) statereaderioresult.StateReaderIOResult[AppState, string] {
// return statereaderioresult.Of[AppState](ctx.Value("key").(string))
// },
// )
func Asks[S, A any](f func(context.Context) StateReaderIOResult[S, A]) StateReaderIOResult[S, A] {
return func(s S) ReaderIOResult[Pair[S, A]] {
return func(ctx context.Context) IOResult[Pair[S, A]] {
return f(ctx)(s)(ctx)
}
}
}
// FromResultK lifts a Result-returning function into a Kleisli arrow for StateReaderIOResult.
//
// Example:
//
// validate := func(x int) result.Result[int] {
// if x > 0 { return result.Of(x) }
// return result.Error[int](errors.New("negative"))
// }
// kleisli := statereaderioresult.FromResultK[AppState](validate)
func FromResultK[S, A, B any](f func(A) Result[B]) Kleisli[S, A, B] {
return function.Flow2(
f,
FromResult[S, B],
)
}
// FromIOK lifts an IO-returning function into a Kleisli arrow for StateReaderIOResult.
func FromIOK[S, A, B any](f func(A) IO[B]) Kleisli[S, A, B] {
return function.Flow2(
f,
FromIO[S, B],
)
}
// FromIOResultK lifts an IOResult-returning function into a Kleisli arrow for StateReaderIOResult.
func FromIOResultK[S, A, B any](f func(A) IOResult[B]) Kleisli[S, A, B] {
return function.Flow2(
f,
FromIOResult[S, B],
)
}
// FromReaderIOResultK lifts a ReaderIOResult-returning function into a Kleisli arrow for StateReaderIOResult.
func FromReaderIOResultK[S, A, B any](f func(A) ReaderIOResult[B]) Kleisli[S, A, B] {
return function.Flow2(
f,
FromReaderIOResult[S, B],
)
}
// MonadChainReaderIOResultK chains a StateReaderIOResult with a ReaderIOResult-returning function.
func MonadChainReaderIOResultK[S, A, B any](ma StateReaderIOResult[S, A], f func(A) ReaderIOResult[B]) StateReaderIOResult[S, B] {
return MonadChain(ma, FromReaderIOResultK[S](f))
}
// ChainReaderIOResultK is the curried version of [MonadChainReaderIOResultK].
func ChainReaderIOResultK[S, A, B any](f func(A) ReaderIOResult[B]) Operator[S, A, B] {
return Chain(FromReaderIOResultK[S](f))
}
// MonadChainIOResultK chains a StateReaderIOResult with an IOResult-returning function.
func MonadChainIOResultK[S, A, B any](ma StateReaderIOResult[S, A], f func(A) IOResult[B]) StateReaderIOResult[S, B] {
return MonadChain(ma, FromIOResultK[S](f))
}
// ChainIOResultK is the curried version of [MonadChainIOResultK].
func ChainIOResultK[S, A, B any](f func(A) IOResult[B]) Operator[S, A, B] {
return Chain(FromIOResultK[S](f))
}
// MonadChainResultK chains a StateReaderIOResult with a Result-returning function.
func MonadChainResultK[S, A, B any](ma StateReaderIOResult[S, A], f func(A) Result[B]) StateReaderIOResult[S, B] {
return MonadChain(ma, FromResultK[S](f))
}
// ChainResultK is the curried version of [MonadChainResultK].
func ChainResultK[S, A, B any](f func(A) Result[B]) Operator[S, A, B] {
return Chain(FromResultK[S](f))
}

View File

@@ -0,0 +1,567 @@
// Copyright (c) 2024 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statereaderioresult
import (
"context"
"errors"
"fmt"
"testing"
F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/io"
IOR "github.com/IBM/fp-go/v2/ioresult"
N "github.com/IBM/fp-go/v2/number"
P "github.com/IBM/fp-go/v2/pair"
RES "github.com/IBM/fp-go/v2/result"
"github.com/stretchr/testify/assert"
)
type testState struct {
counter int
}
func TestOf(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
result := Of[testState](42)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Fold(
func(err error) bool {
t.Fatalf("Expected Success but got Error: %v", err)
return false
},
func(p P.Pair[testState, int]) bool {
assert.Equal(t, 42, P.Tail(p))
assert.Equal(t, 0, P.Head(p).counter) // State unchanged
return true
},
)(res)
}
func TestRight(t *testing.T) {
state := testState{counter: 5}
ctx := context.Background()
result := Right[testState](100)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, int]) P.Pair[testState, int] {
assert.Equal(t, 100, P.Tail(p))
assert.Equal(t, 5, P.Head(p).counter)
return p
})(res)
}
func TestLeft(t *testing.T) {
state := testState{counter: 10}
ctx := context.Background()
testErr := errors.New("test error")
result := Left[testState, int](testErr)
res := result(state)(ctx)()
assert.True(t, RES.IsLeft(res))
}
func TestMonadMap(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
result := MonadMap(
Of[testState](21),
N.Mul(2),
)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, int]) P.Pair[testState, int] {
assert.Equal(t, 42, P.Tail(p))
return p
})(res)
}
func TestMap(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
result := F.Pipe1(
Of[testState](21),
Map[testState](N.Mul(2)),
)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, int]) P.Pair[testState, int] {
assert.Equal(t, 42, P.Tail(p))
return p
})(res)
}
func TestMonadChain(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
result := MonadChain(
Of[testState](5),
func(x int) StateReaderIOResult[testState, string] {
return Of[testState](fmt.Sprintf("value: %d", x))
},
)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, string]) P.Pair[testState, string] {
assert.Equal(t, "value: 5", P.Tail(p))
return p
})(res)
}
func TestChain(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
result := F.Pipe1(
Of[testState](5),
Chain(func(x int) StateReaderIOResult[testState, string] {
return Of[testState](fmt.Sprintf("value: %d", x))
}),
)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, string]) P.Pair[testState, string] {
assert.Equal(t, "value: 5", P.Tail(p))
return p
})(res)
}
func TestMonadAp(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
fab := Of[testState](N.Mul(2))
fa := Of[testState](21)
result := MonadAp(fab, fa)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, int]) P.Pair[testState, int] {
assert.Equal(t, 42, P.Tail(p))
return p
})(res)
}
func TestAp(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
fa := Of[testState](21)
result := F.Pipe1(
Of[testState](N.Mul(2)),
Ap[int](fa),
)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, int]) P.Pair[testState, int] {
assert.Equal(t, 42, P.Tail(p))
return p
})(res)
}
func TestFromIOResult(t *testing.T) {
state := testState{counter: 3}
ctx := context.Background()
ior := IOR.Of(55)
result := FromIOResult[testState](ior)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, int]) P.Pair[testState, int] {
assert.Equal(t, 55, P.Tail(p))
assert.Equal(t, 3, P.Head(p).counter)
return p
})(res)
}
func TestFromState(t *testing.T) {
initialState := testState{counter: 10}
ctx := context.Background()
// State computation that increments counter and returns it
stateComp := func(s testState) P.Pair[testState, int] {
newState := testState{counter: s.counter + 1}
return P.MakePair(newState, newState.counter)
}
result := FromState(stateComp)
res := result(initialState)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, int]) P.Pair[testState, int] {
assert.Equal(t, 11, P.Tail(p)) // Incremented value
assert.Equal(t, 11, P.Head(p).counter) // State updated
return p
})(res)
}
func TestFromIO(t *testing.T) {
state := testState{counter: 8}
ctx := context.Background()
ioVal := func() int { return 99 }
result := FromIO[testState](ioVal)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, int]) P.Pair[testState, int] {
assert.Equal(t, 99, P.Tail(p))
assert.Equal(t, 8, P.Head(p).counter)
return p
})(res)
}
func TestFromResult(t *testing.T) {
state := testState{counter: 12}
ctx := context.Background()
// Test Success case
resultSuccess := FromResult[testState](RES.Of(42))
resSuccess := resultSuccess(state)(ctx)()
assert.True(t, RES.IsRight(resSuccess))
// Test Error case
resultError := FromResult[testState](RES.Left[int](errors.New("error")))
resError := resultError(state)(ctx)()
assert.True(t, RES.IsLeft(resError))
}
func TestLocal(t *testing.T) {
state := testState{counter: 0}
ctx := context.WithValue(context.Background(), "key", "value1")
// Create a computation that uses the context
comp := Asks(func(c context.Context) StateReaderIOResult[testState, string] {
val := c.Value("key").(string)
return Of[testState](val)
})
// Modify context before running computation
result := Local[testState, string](
func(c context.Context) context.Context {
return context.WithValue(c, "key", "value2")
},
)(comp)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, string]) P.Pair[testState, string] {
assert.Equal(t, "value2", P.Tail(p))
return p
})(res)
}
func TestAsks(t *testing.T) {
state := testState{counter: 0}
ctx := context.WithValue(context.Background(), "multiplier", 7)
result := Asks(func(c context.Context) StateReaderIOResult[testState, int] {
mult := c.Value("multiplier").(int)
return Of[testState](mult * 5)
})
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, int]) P.Pair[testState, int] {
assert.Equal(t, 35, P.Tail(p))
return p
})(res)
}
func TestFromResultK(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
validate := func(x int) RES.Result[int] {
if x > 0 {
return RES.Of(x * 2)
}
return RES.Left[int](errors.New("negative"))
}
kleisli := FromResultK[testState](validate)
// Test with valid input
resultValid := kleisli(5)
resValid := resultValid(state)(ctx)()
assert.True(t, RES.IsRight(resValid))
RES.Map(func(p P.Pair[testState, int]) P.Pair[testState, int] {
assert.Equal(t, 10, P.Tail(p))
return p
})(resValid)
// Test with invalid input
resultInvalid := kleisli(-5)
resInvalid := resultInvalid(state)(ctx)()
assert.True(t, RES.IsLeft(resInvalid))
}
func TestFromIOK(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
ioFunc := func(x int) io.IO[int] {
return func() int { return x * 3 }
}
kleisli := FromIOK[testState](ioFunc)
result := kleisli(7)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, int]) P.Pair[testState, int] {
assert.Equal(t, 21, P.Tail(p))
return p
})(res)
}
func TestFromIOResultK(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
iorFunc := func(x int) IOR.IOResult[int] {
if x > 0 {
return IOR.Of(x * 4)
}
return IOR.Left[int](errors.New("invalid"))
}
kleisli := FromIOResultK[testState](iorFunc)
result := kleisli(3)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, int]) P.Pair[testState, int] {
assert.Equal(t, 12, P.Tail(p))
return p
})(res)
}
func TestChainResultK(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
validate := func(x int) RES.Result[string] {
if x > 0 {
return RES.Of(fmt.Sprintf("valid: %d", x))
}
return RES.Left[string](errors.New("invalid"))
}
result := F.Pipe1(
Of[testState](42),
ChainResultK[testState](validate),
)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, string]) P.Pair[testState, string] {
assert.Equal(t, "valid: 42", P.Tail(p))
return p
})(res)
}
func TestChainIOResultK(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
iorFunc := func(x int) IOR.IOResult[string] {
return IOR.Of(fmt.Sprintf("result: %d", x))
}
result := F.Pipe1(
Of[testState](100),
ChainIOResultK[testState](iorFunc),
)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, string]) P.Pair[testState, string] {
assert.Equal(t, "result: 100", P.Tail(p))
return p
})(res)
}
func TestDo(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
type Result struct {
value int
}
result := Do[testState](Result{})
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, Result]) P.Pair[testState, Result] {
assert.Equal(t, 0, P.Tail(p).value)
return p
})(res)
}
func TestBindTo(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
type Result struct {
value int
}
result := F.Pipe1(
Of[testState](42),
BindTo[testState](func(v int) Result {
return Result{value: v}
}),
)
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, Result]) P.Pair[testState, Result] {
assert.Equal(t, 42, P.Tail(p).value)
return p
})(res)
}
func TestStatefulComputation(t *testing.T) {
initialState := testState{counter: 0}
ctx := context.Background()
// Create a computation that modifies state
incrementAndGet := func(s testState) P.Pair[testState, int] {
newState := testState{counter: s.counter + 1}
return P.MakePair(newState, newState.counter)
}
// Chain multiple stateful operations
result := F.Pipe2(
FromState(incrementAndGet),
Chain(func(v1 int) StateReaderIOResult[testState, int] {
return FromState(incrementAndGet)
}),
Chain(func(v2 int) StateReaderIOResult[testState, int] {
return FromState(incrementAndGet)
}),
)
res := result(initialState)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, int]) P.Pair[testState, int] {
assert.Equal(t, 3, P.Tail(p)) // Last incremented value
assert.Equal(t, 3, P.Head(p).counter) // State updated three times
return p
})(res)
}
func TestErrorPropagation(t *testing.T) {
state := testState{counter: 0}
ctx := context.Background()
testErr := errors.New("test error")
// Chain operations where the second one fails
result := F.Pipe1(
Of[testState](42),
Chain(func(x int) StateReaderIOResult[testState, int] {
return Left[testState, int](testErr)
}),
)
res := result(state)(ctx)()
assert.True(t, RES.IsLeft(res))
}
func TestPointed(t *testing.T) {
p := Pointed[testState, int]()
assert.NotNil(t, p)
result := p.Of(42)
state := testState{counter: 0}
ctx := context.Background()
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
}
func TestFunctor(t *testing.T) {
f := Functor[testState, int, string]()
assert.NotNil(t, f)
mapper := f.Map(func(x int) string { return fmt.Sprintf("%d", x) })
result := mapper(Of[testState](42))
state := testState{counter: 0}
ctx := context.Background()
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, string]) P.Pair[testState, string] {
assert.Equal(t, "42", P.Tail(p))
return p
})(res)
}
func TestApplicative(t *testing.T) {
a := Applicative[testState, int, string]()
assert.NotNil(t, a)
fab := Of[testState](func(x int) string { return fmt.Sprintf("%d", x) })
fa := Of[testState](42)
result := a.Ap(fa)(fab)
state := testState{counter: 0}
ctx := context.Background()
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, string]) P.Pair[testState, string] {
assert.Equal(t, "42", P.Tail(p))
return p
})(res)
}
func TestMonad(t *testing.T) {
m := Monad[testState, int, string]()
assert.NotNil(t, m)
fa := m.Of(42)
result := m.Chain(func(x int) StateReaderIOResult[testState, string] {
return Of[testState](fmt.Sprintf("%d", x))
})(fa)
state := testState{counter: 0}
ctx := context.Background()
res := result(state)(ctx)()
assert.True(t, RES.IsRight(res))
RES.Map(func(p P.Pair[testState, string]) P.Pair[testState, string] {
assert.Equal(t, "42", P.Tail(p))
return p
})(res)
}

View File

@@ -0,0 +1,87 @@
// Copyright (c) 2024 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package testing
import (
"context"
"testing"
RIORES "github.com/IBM/fp-go/v2/context/readerioresult"
ST "github.com/IBM/fp-go/v2/context/statereaderioresult"
EQ "github.com/IBM/fp-go/v2/eq"
L "github.com/IBM/fp-go/v2/internal/monad/testing"
P "github.com/IBM/fp-go/v2/pair"
RES "github.com/IBM/fp-go/v2/result"
)
// AssertLaws asserts the monad laws for the StateReaderIOResult monad
func AssertLaws[S, A, B, C any](t *testing.T,
eqs EQ.Eq[S],
eqa EQ.Eq[A],
eqb EQ.Eq[B],
eqc EQ.Eq[C],
ab func(A) B,
bc func(B) C,
s S,
ctx context.Context,
) func(a A) bool {
eqra := RIORES.Eq(RES.Eq(P.Eq(eqs, eqa)))(ctx)
eqrb := RIORES.Eq(RES.Eq(P.Eq(eqs, eqb)))(ctx)
eqrc := RIORES.Eq(RES.Eq(P.Eq(eqs, eqc)))(ctx)
fofc := ST.Pointed[S, C]()
fofaa := ST.Pointed[S, func(A) A]()
fofbc := ST.Pointed[S, func(B) C]()
fofabb := ST.Pointed[S, func(func(A) B) B]()
fmap := ST.Functor[S, func(B) C, func(func(A) B) func(A) C]()
fapabb := ST.Applicative[S, func(A) B, B]()
fapabac := ST.Applicative[S, func(A) B, func(A) C]()
maa := ST.Monad[S, A, A]()
mab := ST.Monad[S, A, B]()
mac := ST.Monad[S, A, C]()
mbc := ST.Monad[S, B, C]()
return L.MonadAssertLaws(t,
ST.Eq(eqra)(s),
ST.Eq(eqrb)(s),
ST.Eq(eqrc)(s),
fofc,
fofaa,
fofbc,
fofabb,
fmap,
fapabb,
fapabac,
maa,
mab,
mac,
mbc,
ab,
bc,
)
}

Some files were not shown because too many files have changed in this diff Show More