1
0
mirror of https://github.com/IBM/fp-go.git synced 2026-03-10 13:31:01 +02:00

Compare commits

...

15 Commits

Author SHA1 Message Date
Dr. Carsten Leue
c6d30bb642 fix: increase test timeout
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-08 23:21:27 +01:00
Dr. Carsten Leue
1821f00fbe fix: introduce effect.LocalReaderK
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-08 22:52:20 +01:00
Dr. Carsten Leue
f0ec0b2541 fix: optimize record performance
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-08 22:20:19 +01:00
Dr. Carsten Leue
ce3c7d9359 fix: documentation of endomorphism
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-08 22:02:11 +01:00
Dr. Carsten Leue
3ed354cc8c fix: implement endomorphism.Read
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-08 19:01:22 +01:00
Dr. Carsten Leue
0932c8c464 fix: add tests for totality and move skills
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-08 14:12:41 +01:00
Dr. Carsten Leue
475d09e987 fix: add skills
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-07 22:39:33 +01:00
Dr. Carsten Leue
fd21bdeabf fix: signature of local for context/readerresult
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-07 22:03:17 +01:00
Dr. Carsten Leue
6834f72856 fix: make signature of Local for context more generic, but backwards compatible
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-07 21:02:24 +01:00
Dr. Carsten Leue
8cfb7ef659 fix: logging implementation for context sensitive operations
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-06 23:54:42 +01:00
Dr. Carsten Leue
622c87d734 fix: logging implementation for context sensitive operations
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-06 23:50:54 +01:00
Dr. Carsten Leue
2ce406a410 fix: add context7 badge
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-06 15:03:05 +01:00
Dr. Carsten Leue
3743361b9f fix: context7 at correct location
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-06 14:41:47 +01:00
Dr. Carsten Leue
69d11f0a4b fix: claim context7
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-06 14:39:58 +01:00
Dr. Carsten Leue
e4dd1169c4 fix: recursion in Errors()
Signed-off-by: Dr. Carsten Leue <carsten.leue@de.ibm.com>
2026-03-04 11:12:22 +01:00
45 changed files with 6023 additions and 377 deletions

4
context7.json Normal file
View File

@@ -0,0 +1,4 @@
{
"url": "https://context7.com/ibm/fp-go",
"public_key": "pk_7wJdJRn8zGHxvIYu7eh9h"
}

318
skills/fp-go-http/SKILL.md Normal file
View File

@@ -0,0 +1,318 @@
# fp-go HTTP Requests
## Overview
fp-go wraps `net/http` in the `ReaderIOResult` monad, giving you composable, context-aware HTTP operations with automatic error propagation. The core package is:
```
github.com/IBM/fp-go/v2/context/readerioresult/http
```
All HTTP operations are lazy — they describe what to do but do not execute until you call the resulting function with a `context.Context`.
## Core Types
```go
// Requester builds an *http.Request given a context.
type Requester = ReaderIOResult[*http.Request] // func(context.Context) func() result.Result[*http.Request]
// Client executes a Requester and returns the response wrapped in ReaderIOResult.
type Client interface {
Do(Requester) ReaderIOResult[*http.Response]
}
```
## Basic Usage
### 1. Create a Client
```go
import (
HTTP "net/http"
H "github.com/IBM/fp-go/v2/context/readerioresult/http"
)
client := H.MakeClient(HTTP.DefaultClient)
// Or with a custom client:
custom := &HTTP.Client{Timeout: 10 * time.Second}
client := H.MakeClient(custom)
```
### 2. Build a Request
```go
// GET request (most common)
req := H.MakeGetRequest("https://api.example.com/users/1")
// Arbitrary method + body
req := H.MakeRequest("POST", "https://api.example.com/users", bodyReader)
```
### 3. Execute and Parse
```go
import (
"context"
H "github.com/IBM/fp-go/v2/context/readerioresult/http"
)
type User struct {
ID int `json:"id"`
Name string `json:"name"`
}
client := H.MakeClient(HTTP.DefaultClient)
// ReadJSON validates status, Content-Type, then unmarshals JSON
result := H.ReadJSON[User](client)(H.MakeGetRequest("https://api.example.com/users/1"))
// Execute — provide context once
user, err := result(context.Background())()
```
## Response Readers
All accept a `Client` and return a function `Requester → ReaderIOResult[A]`:
| Function | Returns | Notes |
|----------|---------|-------|
| `ReadJSON[A](client)` | `ReaderIOResult[A]` | Validates status + Content-Type, unmarshals JSON |
| `ReadText(client)` | `ReaderIOResult[string]` | Validates status, reads body as UTF-8 string |
| `ReadAll(client)` | `ReaderIOResult[[]byte]` | Validates status, returns raw body bytes |
| `ReadFullResponse(client)` | `ReaderIOResult[FullResponse]` | Returns `Pair[*http.Response, []byte]` |
`FullResponse = Pair[*http.Response, []byte]` — use `pair.First` / `pair.Second` to access components.
## Composing Requests in Pipelines
```go
import (
F "github.com/IBM/fp-go/v2/function"
H "github.com/IBM/fp-go/v2/context/readerioresult/http"
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
IO "github.com/IBM/fp-go/v2/io"
)
client := H.MakeClient(HTTP.DefaultClient)
readPost := H.ReadJSON[Post](client)
pipeline := F.Pipe2(
H.MakeGetRequest("https://jsonplaceholder.typicode.com/posts/1"),
readPost,
RIO.ChainFirstIOK(IO.Logf[Post]("Got post: %v")),
)
post, err := pipeline(context.Background())()
```
## Parallel Requests — Homogeneous Types
Use `RIO.TraverseArray` when all requests return the same type:
```go
import (
A "github.com/IBM/fp-go/v2/array"
F "github.com/IBM/fp-go/v2/function"
H "github.com/IBM/fp-go/v2/context/readerioresult/http"
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
IO "github.com/IBM/fp-go/v2/io"
)
type PostItem struct {
UserID uint `json:"userId"`
ID uint `json:"id"`
Title string `json:"title"`
}
client := H.MakeClient(HTTP.DefaultClient)
readPost := H.ReadJSON[PostItem](client)
// Fetch 10 posts in parallel
data := F.Pipe3(
A.MakeBy(10, func(i int) string {
return fmt.Sprintf("https://jsonplaceholder.typicode.com/posts/%d", i+1)
}),
RIO.TraverseArray(F.Flow3(
H.MakeGetRequest,
readPost,
RIO.ChainFirstIOK(IO.Logf[PostItem]("Post: %v")),
)),
RIO.ChainFirstIOK(IO.Logf[[]PostItem]("All posts: %v")),
RIO.Map(A.Size[PostItem]),
)
count, err := data(context.Background())()
```
## Parallel Requests — Heterogeneous Types
Use `RIO.TraverseTuple2` (or `Tuple3`, etc.) when requests return different types:
```go
import (
T "github.com/IBM/fp-go/v2/tuple"
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
H "github.com/IBM/fp-go/v2/context/readerioresult/http"
F "github.com/IBM/fp-go/v2/function"
)
type CatFact struct {
Fact string `json:"fact"`
}
client := H.MakeClient(HTTP.DefaultClient)
readPost := H.ReadJSON[PostItem](client)
readCatFact := H.ReadJSON[CatFact](client)
// Execute both requests in parallel with different response types
data := F.Pipe3(
T.MakeTuple2(
"https://jsonplaceholder.typicode.com/posts/1",
"https://catfact.ninja/fact",
),
T.Map2(H.MakeGetRequest, H.MakeGetRequest), // build both requesters
RIO.TraverseTuple2(readPost, readCatFact), // run in parallel, typed
RIO.ChainFirstIOK(IO.Logf[T.Tuple2[PostItem, CatFact]]("Result: %v")),
)
both, err := data(context.Background())()
// both.F1 is PostItem, both.F2 is CatFact
```
## Building Requests with the Builder API
For complex requests (custom headers, query params, JSON body), use the builder:
```go
import (
B "github.com/IBM/fp-go/v2/http/builder"
RB "github.com/IBM/fp-go/v2/context/readerioresult/http/builder"
F "github.com/IBM/fp-go/v2/function"
)
// GET with query parameters
req := F.Pipe2(
B.Default,
B.WithURL("https://api.example.com/items?page=1"),
B.WithQueryArg("limit")("50"),
)
requester := RB.Requester(req)
// POST with JSON body
req := F.Pipe3(
B.Default,
B.WithURL("https://api.example.com/users"),
B.WithMethod("POST"),
B.WithJSON(map[string]string{"name": "Alice"}),
// sets Content-Type: application/json automatically
)
requester := RB.Requester(req)
// With authentication and custom headers
req := F.Pipe3(
B.Default,
B.WithURL("https://api.example.com/protected"),
B.WithBearer("my-token"), // sets Authorization: Bearer my-token
B.WithHeader("X-Request-ID")("123"),
)
requester := RB.Requester(req)
// Execute
result := H.ReadJSON[Response](client)(requester)
data, err := result(ctx)()
```
### Builder Functions
| Function | Effect |
|----------|--------|
| `B.WithURL(url)` | Set the target URL |
| `B.WithMethod(method)` | Set HTTP method (GET, POST, PUT, DELETE, …) |
| `B.WithJSON(v)` | Marshal `v` as JSON body, set `Content-Type: application/json` |
| `B.WithBytes(data)` | Set raw bytes body, set `Content-Length` automatically |
| `B.WithHeader(key)(value)` | Add a request header |
| `B.WithBearer(token)` | Set `Authorization: Bearer <token>` |
| `B.WithQueryArg(key)(value)` | Append a query parameter |
## Error Handling
Errors from request creation, HTTP status codes, Content-Type validation, and JSON parsing all propagate automatically through the `Result` monad. You only handle errors at the call site:
```go
// Pattern 1: direct extraction
value, err := pipeline(ctx)()
if err != nil { /* handle */ }
// Pattern 2: Fold for clean HTTP handler
RIO.Fold(
func(err error) { http.Error(w, err.Error(), http.StatusInternalServerError) },
func(data MyType) { json.NewEncoder(w).Encode(data) },
)(pipeline)(ctx)()
```
## Full HTTP Handler Example
```go
package main
import (
"context"
"encoding/json"
"net/http"
HTTP "net/http"
"fmt"
F "github.com/IBM/fp-go/v2/function"
H "github.com/IBM/fp-go/v2/context/readerioresult/http"
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
IO "github.com/IBM/fp-go/v2/io"
)
type Post struct {
ID int `json:"id"`
Title string `json:"title"`
}
var client = H.MakeClient(HTTP.DefaultClient)
func fetchPost(id int) RIO.ReaderIOResult[Post] {
url := fmt.Sprintf("https://jsonplaceholder.typicode.com/posts/%d", id)
return F.Pipe2(
H.MakeGetRequest(url),
H.ReadJSON[Post](client),
RIO.ChainFirstIOK(IO.Logf[Post]("fetched: %v")),
)
}
func handler(w http.ResponseWriter, r *http.Request) {
RIO.Fold(
func(err error) {
http.Error(w, err.Error(), http.StatusBadGateway)
},
func(post Post) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(post)
},
)(fetchPost(1))(r.Context())()
}
```
## Import Reference
```go
import (
HTTP "net/http"
H "github.com/IBM/fp-go/v2/context/readerioresult/http"
RB "github.com/IBM/fp-go/v2/context/readerioresult/http/builder"
B "github.com/IBM/fp-go/v2/http/builder"
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
F "github.com/IBM/fp-go/v2/function"
A "github.com/IBM/fp-go/v2/array"
T "github.com/IBM/fp-go/v2/tuple"
IO "github.com/IBM/fp-go/v2/io"
)
```
Requires Go 1.24+.

View File

@@ -0,0 +1,410 @@
# fp-go Logging
## Overview
fp-go provides logging utilities that integrate naturally with functional pipelines. Logging is always a **side effect** — it should not change the value being processed. The library achieves this through `ChainFirst`-style combinators that thread the original value through unchanged while performing the log.
## Packages
| Package | Purpose |
|---------|---------|
| `github.com/IBM/fp-go/v2/logging` | Global logger, context-embedded logger, `LoggingCallbacks` |
| `github.com/IBM/fp-go/v2/io` | `Logf`, `Logger`, `LogGo`, `Printf`, `PrintGo` — IO-level logging helpers |
| `github.com/IBM/fp-go/v2/readerio` | `SLog`, `SLogWithCallback` — structured logging for ReaderIO |
| `github.com/IBM/fp-go/v2/context/readerio` | `SLog`, `SLogWithCallback` — structured logging for context ReaderIO |
| `github.com/IBM/fp-go/v2/context/readerresult` | `SLog`, `TapSLog`, `SLogWithCallback` — structured logging for ReaderResult |
| `github.com/IBM/fp-go/v2/context/readerioresult` | `SLog`, `TapSLog`, `SLogWithCallback`, `LogEntryExit`, `LogEntryExitWithCallback` — full suite for ReaderIOResult |
## Logging Inside Pipelines
The idiomatic way to log inside a monadic pipeline is `ChainFirstIOK` (or `ChainFirst` where the monad is already IO). These combinators execute a side-effecting function and pass the **original value** downstream unchanged.
### With `IOResult` / `ReaderIOResult` — printf-style
```go
import (
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
IO "github.com/IBM/fp-go/v2/io"
F "github.com/IBM/fp-go/v2/function"
)
pipeline := F.Pipe3(
fetchUser(42),
RIO.ChainEitherK(validateUser),
// Log after validation — value flows through unchanged
RIO.ChainFirstIOK(IO.Logf[User]("Validated user: %v")),
RIO.Map(enrichUser),
)
```
`IO.Logf[A](format string) func(A) IO[A]` logs using `log.Printf` and returns the value unchanged. It's a Kleisli arrow suitable for `ChainFirst` and `ChainFirstIOK`.
### With `IOEither` / plain `IO`
```go
import (
IOE "github.com/IBM/fp-go/v2/ioeither"
IO "github.com/IBM/fp-go/v2/io"
F "github.com/IBM/fp-go/v2/function"
)
pipeline := F.Pipe3(
file.ReadFile("config.json"),
IOE.ChainEitherK(J.Unmarshal[Config]),
IOE.ChainFirstIOK(IO.Logf[Config]("Loaded config: %v")),
IOE.Map[error](processConfig),
)
```
### Logging Arrays in TraverseArray
```go
import (
A "github.com/IBM/fp-go/v2/array"
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
IO "github.com/IBM/fp-go/v2/io"
F "github.com/IBM/fp-go/v2/function"
)
// Log each item individually, then log the final slice
pipeline := F.Pipe2(
A.MakeBy(3, idxToFilename),
RIO.TraverseArray(F.Flow3(
file.ReadFile,
RIO.ChainEitherK(J.Unmarshal[Record]),
RIO.ChainFirstIOK(IO.Logf[Record]("Parsed record: %v")),
)),
RIO.ChainFirstIOK(IO.Logf[[]Record]("All records: %v")),
)
```
## IO Logging Functions
All live in `github.com/IBM/fp-go/v2/io`:
### `Logf` — printf-style
```go
IO.Logf[A any](format string) func(A) IO[A]
```
Uses `log.Printf`. The format string works like `fmt.Sprintf`.
```go
IO.Logf[User]("Processing user: %+v")
IO.Logf[int]("Count: %d")
```
### `Logger` — with custom `*log.Logger`
```go
IO.Logger[A any](loggers ...*log.Logger) func(prefix string) func(A) IO[A]
```
Uses `logger.Printf(prefix+": %v", value)`. Pass your own `*log.Logger` instance.
```go
customLog := log.New(os.Stderr, "APP ", log.LstdFlags)
logUser := IO.Logger[User](customLog)("user")
// logs: "APP user: {ID:42 Name:Alice}"
```
### `LogGo` — Go template syntax
```go
IO.LogGo[A any](tmpl string) func(A) IO[A]
```
Uses Go's `text/template`. The template receives the value as `.`.
```go
type User struct{ Name string; Age int }
IO.LogGo[User]("User {{.Name}} is {{.Age}} years old")
```
### `Printf` / `PrintGo` — stdout instead of log
Same signatures as `Logf` / `LogGo` but use `fmt.Printf`/`fmt.Println` (no log prefix, no timestamp).
```go
IO.Printf[Result]("Result: %v\n")
IO.PrintGo[User]("Name: {{.Name}}")
```
## Structured Logging in the `context` Package
The `context/readerioresult`, `context/readerresult`, and `context/readerio` packages provide structured `slog`-based logging functions that are context-aware: they retrieve the logger from the context (via `logging.GetLoggerFromContext`) rather than using a fixed logger instance.
### `TapSLog` — inline structured logging in a ReaderIOResult pipeline
`TapSLog` is an **Operator** (`func(ReaderIOResult[A]) ReaderIOResult[A]`). It sits directly in a `F.Pipe` call on a `ReaderIOResult`, logs the current value or error using `slog`, and passes the result through unchanged.
```go
import (
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
F "github.com/IBM/fp-go/v2/function"
)
pipeline := F.Pipe4(
fetchOrder(orderID),
RIO.TapSLog[Order]("Order fetched"), // logs value=<Order> or error=<err>
RIO.Chain(validateOrder),
RIO.TapSLog[Order]("Order validated"),
RIO.Chain(processPayment),
)
result, err := pipeline(ctx)()
```
- Logs **both** success values (`value=<A>`) and errors (`error=<err>`) using `slog` structured attributes.
- Respects the logger level — if the logger is configured to discard Info-level logs, nothing is written.
- Available in both `context/readerioresult` and `context/readerresult`.
### `SLog` — Kleisli-style structured logging
`SLog` is a **Kleisli arrow** (`func(Result[A]) ReaderResult[A]` / `func(Result[A]) ReaderIOResult[A]`). It is used with `Chain` when you want to intercept the raw `Result` directly.
```go
import (
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
F "github.com/IBM/fp-go/v2/function"
)
pipeline := F.Pipe3(
fetchData(id),
RIO.Chain(RIO.SLog[Data]("Data fetched")), // log raw Result, pass it through
RIO.Chain(validateData),
RIO.Chain(RIO.SLog[Data]("Data validated")),
RIO.Chain(processData),
)
```
**Difference from `TapSLog`:**
- `TapSLog[A](msg)` is an `Operator[A, A]` — used directly in `F.Pipe` on a `ReaderIOResult[A]`.
- `SLog[A](msg)` is a `Kleisli[Result[A], A]` — used with `Chain`, giving access to the raw `Result[A]`.
Both log in the same format. `TapSLog` is more ergonomic in most pipelines.
### `SLogWithCallback` — custom log level and logger source
```go
import (
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
"log/slog"
)
// Log at DEBUG level with a custom logger extracted from context
debugLog := RIO.SLogWithCallback[User](
slog.LevelDebug,
logging.GetLoggerFromContext, // or any func(context.Context) *slog.Logger
"Fetched user",
)
pipeline := F.Pipe2(
fetchUser(123),
RIO.Chain(debugLog),
RIO.Map(func(u User) string { return u.Name }),
)
```
### `LogEntryExit` — automatic entry/exit timing with correlation IDs
`LogEntryExit` wraps a `ReaderIOResult` computation with structured entry and exit log messages. It assigns a unique **correlation ID** (`ID=<n>`) to each invocation so concurrent or nested operations can be correlated in logs.
```go
import (
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
F "github.com/IBM/fp-go/v2/function"
)
pipeline := F.Pipe3(
fetchUser(123),
RIO.LogEntryExit[User]("fetchUser"), // wraps the operation
RIO.Chain(func(user User) RIO.ReaderIOResult[[]Order] {
return F.Pipe1(
fetchOrders(user.ID),
RIO.LogEntryExit[[]Order]("fetchOrders"),
)
}),
)
result, err := pipeline(ctx)()
// Logs:
// level=INFO msg="[entering]" name=fetchUser ID=1
// level=INFO msg="[exiting ]" name=fetchUser ID=1 duration=42ms
// level=INFO msg="[entering]" name=fetchOrders ID=2
// level=INFO msg="[exiting ]" name=fetchOrders ID=2 duration=18ms
```
On error, the exit log changes to `[throwing]` and includes the error:
```
level=INFO msg="[throwing]" name=fetchUser ID=3 duration=5ms error="user not found"
```
Key properties:
- **Correlation ID** (`ID=`) is unique per operation, monotonically increasing, and stored in the context so nested operations can access the parent's ID.
- **Duration** (`duration=`) is measured from entry to exit.
- **Logger is taken from the context** — embed a request-scoped logger with `logging.WithLogger` before executing the pipeline and `LogEntryExit` picks it up automatically.
- **Level-aware** — if the logger does not have the log level enabled, the entire entry/exit instrumentation is skipped (zero overhead).
- The original `ReaderIOResult[A]` value flows through **unchanged**.
```go
// Use a context logger so all log messages carry request metadata
cancelFn, ctxWithLogger := pair.Unpack(
logging.WithLogger(
slog.Default().With("requestID", r.Header.Get("X-Request-ID")),
)(r.Context()),
)
defer cancelFn()
result, err := pipeline(ctxWithLogger)()
```
### `LogEntryExitWithCallback` — custom log level
```go
import (
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
"log/slog"
)
// Log at DEBUG level instead of INFO
debugPipeline := F.Pipe1(
expensiveComputation(),
RIO.LogEntryExitWithCallback[Result](
slog.LevelDebug,
logging.GetLoggerFromContext,
"expensiveComputation",
),
)
```
### `SLog` / `SLogWithCallback` in `context/readerresult`
The same `SLog` and `TapSLog` functions are also available in `context/readerresult` for use with the synchronous `ReaderResult[A] = func(context.Context) (A, error)` monad:
```go
import RR "github.com/IBM/fp-go/v2/context/readerresult"
pipeline := F.Pipe3(
queryDB(id),
RR.TapSLog[Row]("Row fetched"),
RR.Chain(parseRow),
RR.TapSLog[Record]("Record parsed"),
)
```
## Global Logger (`logging` package)
The `logging` package manages a global `*slog.Logger` (structured logging, Go 1.21+).
```go
import "github.com/IBM/fp-go/v2/logging"
// Get the current global logger (defaults to slog.Default())
logger := logging.GetLogger()
logger.Info("application started", "version", "1.0")
// Replace the global logger; returns the old one for deferred restore
old := logging.SetLogger(slog.New(slog.NewJSONHandler(os.Stdout, nil)))
defer logging.SetLogger(old)
```
## Context-Embedded Logger
Embed a `*slog.Logger` in a `context.Context` to carry request-scoped loggers across the call stack. All context-package logging functions (`TapSLog`, `SLog`, `LogEntryExit`) pick up this logger automatically.
```go
import (
"github.com/IBM/fp-go/v2/logging"
"github.com/IBM/fp-go/v2/pair"
"log/slog"
)
// Create a request-scoped logger
reqLogger := slog.Default().With("requestID", "abc-123")
// Embed it into a context using the Kleisli arrow WithLogger
cancelFn, ctxWithLogger := pair.Unpack(logging.WithLogger(reqLogger)(ctx))
defer cancelFn()
// All downstream logging (TapSLog, LogEntryExit, etc.) uses reqLogger
result, err := pipeline(ctxWithLogger)()
```
`WithLogger` returns a `ContextCancel = Pair[context.CancelFunc, context.Context]`. The cancel function is a no-op — the context is only enriched, not made cancellable.
`GetLoggerFromContext` falls back to the global logger if no logger is found in the context.
## `LoggingCallbacks` — Dual-Logger Pattern
```go
import "github.com/IBM/fp-go/v2/logging"
// Returns (infoCallback, errorCallback) — both are func(string, ...any)
infoLog, errLog := logging.LoggingCallbacks() // use log.Default() for both
infoLog, errLog := logging.LoggingCallbacks(myLogger) // same logger for both
infoLog, errLog := logging.LoggingCallbacks(infoLog, errorLog) // separate loggers
```
Used internally by `io.Logger` and by packages that need separate info/error sinks.
## Choosing the Right Logging Function
| Situation | Use |
|-----------|-----|
| Quick printf logging mid-pipeline | `IO.Logf[A]("fmt")` with `ChainFirstIOK` |
| Go template formatting mid-pipeline | `IO.LogGo[A]("tmpl")` with `ChainFirstIOK` |
| Print to stdout (no log prefix) | `IO.Printf[A]("fmt")` with `ChainFirstIOK` |
| Structured slog — log value or error inline | `RIO.TapSLog[A]("msg")` (Operator, used in Pipe) |
| Structured slog — intercept raw Result | `RIO.Chain(RIO.SLog[A]("msg"))` (Kleisli) |
| Structured slog — custom log level | `RIO.SLogWithCallback[A](level, cb, "msg")` |
| Entry/exit timing + correlation IDs | `RIO.LogEntryExit[A]("name")` |
| Entry/exit at custom log level | `RIO.LogEntryExitWithCallback[A](level, cb, "name")` |
| Structured logging globally | `logging.GetLogger()` / `logging.SetLogger()` |
| Request-scoped logger in context | `logging.WithLogger(logger)` + `logging.GetLoggerFromContext(ctx)` |
| Custom `*log.Logger` in pipeline | `IO.Logger[A](logger)("prefix")` with `ChainFirstIOK` |
## Complete Example
```go
package main
import (
"context"
"log/slog"
"os"
F "github.com/IBM/fp-go/v2/function"
IO "github.com/IBM/fp-go/v2/io"
L "github.com/IBM/fp-go/v2/logging"
P "github.com/IBM/fp-go/v2/pair"
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
)
func main() {
// Configure JSON structured logging globally
L.SetLogger(slog.New(slog.NewJSONHandler(os.Stdout, nil)))
// Embed a request-scoped logger into the context
_, ctx := P.Unpack(L.WithLogger(
L.GetLogger().With("requestID", "req-001"),
)(context.Background()))
pipeline := F.Pipe5(
fetchData(42),
RIO.LogEntryExit[Data]("fetchData"), // entry/exit with timing + ID
RIO.TapSLog[Data]("raw data"), // inline structured value log
RIO.ChainEitherK(transformData),
RIO.LogEntryExit[Result]("transformData"),
RIO.ChainFirstIOK(IO.LogGo[Result]("result: {{.Value}}")), // template log
)
value, err := pipeline(ctx)()
if err != nil {
L.GetLogger().Error("pipeline failed", "error", err)
}
_ = value
}
```

View File

@@ -0,0 +1,520 @@
# fp-go Monadic Operations
## Overview
`fp-go` (import path `github.com/IBM/fp-go/v2`) brings type-safe functional programming to Go using generics. Every monad follows a **consistent interface**: once you know the pattern in one monad, it transfers to all others.
All functions use the **data-last** principle: the data being transformed is always the last argument, enabling partial application and pipeline composition.
## Core Types
| Type | Package | Represents |
|------|---------|------------|
| `Option[A]` | `option` | A value that may or may not be present (replaces nil) |
| `Either[E, A]` | `either` | A value that is either a left error `E` or a right success `A` |
| `Result[A]` | `result` | `Either[error, A]` — shorthand for the common case |
| `IO[A]` | `io` | A lazy computation that produces `A` (possibly with side effects) |
| `IOResult[A]` | `ioresult` | `IO[Result[A]]` — lazy computation that can fail |
| `ReaderIOResult[A]` | `context/readerioresult` | `func(context.Context) IOResult[A]` — context-aware IO with errors |
| `Effect[C, A]` | `effect` | `func(C) ReaderIOResult[A]` — typed dependency injection + IO + errors |
Idiomatic (high-performance, tuple-based) equivalents live in `idiomatic/`:
- `idiomatic/option``(A, bool)` tuples
- `idiomatic/result``(A, error)` tuples
- `idiomatic/ioresult``func() (A, error)`
- `idiomatic/context/readerresult``func(context.Context) (A, error)`
## Standard Operations
Every monad exports these operations (PascalCase for exported Go names):
| fp-go | fp-ts / Haskell | Description |
|-------|----------------|-------------|
| `Of` | `of` / `pure` | Lift a pure value into the monad |
| `Map` | `map` / `fmap` | Transform the value inside without changing the context |
| `Chain` | `chain` / `>>=` | Sequence a computation that itself returns a monadic value |
| `Ap` | `ap` / `<*>` | Apply a wrapped function to a wrapped value |
| `Fold` | `fold` / `either` | Eliminate the context — handle every case and extract a plain value |
| `GetOrElse` | `getOrElse` / `fromMaybe` | Extract the value or use a default (Option/Result) |
| `Filter` | `filter` / `mfilter` | Keep only values satisfying a predicate |
| `Flatten` | `flatten` / `join` | Remove one level of nesting (`M[M[A]]``M[A]`) |
| `ChainFirst` | `chainFirst` / `>>` | Sequence for side effects; keeps the original value |
| `Alt` | `alt` / `<\|>` | Provide an alternative when the first computation fails |
| `FromPredicate` | `fromPredicate` / `guard` | Build a monadic value from a predicate |
| `Sequence` | `sequence` | Turn `[]M[A]` into `M[[]A]` |
| `Traverse` | `traverse` | Map and sequence in one step |
Curried (composable) vs. monadic (direct) form:
```go
// Curried — data last, returns a transformer function
option.Map(strings.ToUpper) // func(Option[string]) Option[string]
// Monadic — data first, immediate execution
option.MonadMap(option.Some("hello"), strings.ToUpper)
```
Use curried form for pipelines; use `Monad*` form when you already have all arguments.
## Key Type Aliases (defined per monad)
```go
// A Kleisli arrow: a function from A to a monadic B
type Kleisli[A, B any] = func(A) M[B]
// An operator: transforms one monadic value into another
type Operator[A, B any] = func(M[A]) M[B]
```
`Chain` takes a `Kleisli`, `Map` returns an `Operator`. The naming is consistent across all monads.
## Examples
### Option — nullable values without nil
```go
import (
O "github.com/IBM/fp-go/v2/option"
F "github.com/IBM/fp-go/v2/function"
"strconv"
)
parseAndDouble := F.Flow2(
O.FromPredicate(func(s string) bool { return s != "" }),
O.Chain(func(s string) O.Option[int] {
n, err := strconv.Atoi(s)
if err != nil {
return O.None[int]()
}
return O.Some(n * 2)
}),
)
parseAndDouble("21") // Some(42)
parseAndDouble("") // None
parseAndDouble("abc") // None
```
### Result — error handling without if-err boilerplate
```go
import (
R "github.com/IBM/fp-go/v2/result"
F "github.com/IBM/fp-go/v2/function"
"strconv"
"errors"
)
parse := R.Eitherize1(strconv.Atoi) // lifts (int, error) → Result[int]
validate := func(n int) R.Result[int] {
if n < 0 {
return R.Error[int](errors.New("must be non-negative"))
}
return R.Of(n)
}
pipeline := F.Flow2(parse, R.Chain(validate))
pipeline("42") // Ok(42)
pipeline("-1") // Error("must be non-negative")
pipeline("abc") // Error(strconv parse error)
```
### IOResult — lazy IO with error handling
```go
import (
IOE "github.com/IBM/fp-go/v2/ioresult"
F "github.com/IBM/fp-go/v2/function"
J "github.com/IBM/fp-go/v2/json"
"os"
)
readConfig := F.Flow2(
IOE.Eitherize1(os.ReadFile), // func(string) IOResult[[]byte]
IOE.ChainEitherK(J.Unmarshal[Config]), // parse JSON, propagate errors
)
result := readConfig("config.json")() // execute lazily
```
### ReaderIOResult — context-aware pipelines (recommended for services)
```go
import (
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
F "github.com/IBM/fp-go/v2/function"
"context"
)
// type ReaderIOResult[A any] = func(context.Context) func() result.Result[A]
fetchUser := func(id int) RIO.ReaderIOResult[User] {
return func(ctx context.Context) func() result.Result[User] {
return func() result.Result[User] {
// perform IO here
}
}
}
pipeline := F.Pipe3(
fetchUser(42),
RIO.ChainEitherK(validateUser), // lift pure (User, error) function
RIO.Map(enrichUser), // lift pure User → User function
RIO.ChainFirstIOK(IO.Logf[User]("Fetched: %v")), // side-effect logging
)
user, err := pipeline(ctx)() // provide context once, execute
```
### Traversal — process slices monadically
```go
import (
A "github.com/IBM/fp-go/v2/array"
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
F "github.com/IBM/fp-go/v2/function"
)
// Fetch all users, stop on first error
fetchAll := F.Pipe1(
A.MakeBy(10, userID),
RIO.TraverseArray(fetchUser), // []ReaderIOResult[User] → ReaderIOResult[[]User]
)
```
## Function Composition with Flow and Pipe
```go
import F "github.com/IBM/fp-go/v2/function"
// Flow: compose functions left-to-right, returns a new function
transform := F.Flow3(
option.Map(strings.TrimSpace),
option.Filter(func(s string) bool { return s != "" }),
option.GetOrElse(func() string { return "default" }),
)
result := transform(option.Some(" hello ")) // "hello"
// Pipe: apply a value through a pipeline immediately
result := F.Pipe3(
option.Some(" hello "),
option.Map(strings.TrimSpace),
option.Filter(func(s string) bool { return s != "" }),
option.GetOrElse(func() string { return "default" }),
)
```
## Lifting Pure Functions into Monadic Context
fp-go provides helpers to promote non-monadic functions:
| Helper | Lifts |
|--------|-------|
| `ChainEitherK` | `func(A) (B, error)` → works inside the monad |
| `ChainOptionK` | `func(A) Option[B]` → works inside the monad |
| `ChainFirstIOK` | `func(A) IO[B]` for side effects, keeps original value |
| `Eitherize1..N` | `func(A) (B, error)``func(A) Result[B]` |
| `FromPredicate` | `func(A) bool` + error builder → `func(A) Result[A]` |
## Type Parameter Ordering Rule (V2)
Non-inferrable type parameters come **first**, so the compiler can infer the rest:
```go
// B cannot be inferred from the argument — it comes first
result := either.Ap[string](value)(funcInEither)
// All types inferrable — no explicit params needed
result := either.Map(transform)(value)
result := either.Chain(validator)(value)
```
## When to Use Which Monad
| Situation | Use |
|-----------|-----|
| Value that might be absent | `Option[A]` |
| Operation that can fail with custom error type | `Either[E, A]` |
| Operation that can fail with `error` | `Result[A]` |
| Lazy IO, side effects | `IO[A]` |
| IO that can fail | `IOResult[A]` |
| IO + context (cancellation, deadlines) | `ReaderIOResult[A]` from `context/readerioresult` |
| IO + context + typed dependencies | `Effect[C, A]` |
| High-performance services | Idiomatic packages in `idiomatic/` |
## Do-Notation: Accumulating State with `Bind` and `ApS`
When a pipeline needs to carry **multiple intermediate results** forward — not just a single value — the `Chain`/`Map` style becomes unwieldy because each step only threads one value and prior results are lost. Do-notation solves this by accumulating results into a growing struct (the "state") at each step.
Every monad that supports do-notation exports the same family of functions. The examples below use `context/readerioresult` (`RIO`), but the identical API is available in `result`, `option`, `ioresult`, `readerioresult`, and others.
### The Function Family
| Function | Kind | What it does |
|----------|------|-------------|
| `Do(empty S)` | — | Lift an empty struct into the monad; starting point |
| `BindTo(setter)` | monadic | Convert an existing `M[T]` into `M[S]`; alternative start |
| `Bind(setter, f)` | monadic | Add a result; `f` receives the **current state** and returns `M[T]` |
| `ApS(setter, fa)` | applicative | Add a result; `fa` is **independent** of the current state |
| `Let(setter, f)` | pure | Add a value computed by a **pure function** of the state |
| `LetTo(setter, value)` | pure | Add a **constant** value |
Lens variants (`BindL`, `ApSL`, `LetL`, `LetToL`) accept a `Lens[S, T]` instead of a manual setter, integrating naturally with the optics system.
### `Bind` — Sequential, Dependent Steps
`Bind` sequences two monadic computations. The function `f` receives the **full accumulated state** so it can read anything gathered so far. Errors short-circuit automatically.
```go
import (
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
F "github.com/IBM/fp-go/v2/function"
L "github.com/IBM/fp-go/v2/optics/lens"
"context"
)
type Pipeline struct {
User User
Config Config
Posts []Post
}
// Lenses — focus on individual fields; .Set is already func(T) func(S) S
var (
userLens = L.MakeLens(func(s Pipeline) User { return s.User }, func(s Pipeline, u User) Pipeline { s.User = u; return s })
configLens = L.MakeLens(func(s Pipeline) Config { return s.Config }, func(s Pipeline, c Config) Pipeline { s.Config = c; return s })
postsLens = L.MakeLens(func(s Pipeline) []Post { return s.Posts }, func(s Pipeline, p []Post) Pipeline { s.Posts = p; return s })
)
result := F.Pipe3(
RIO.Do(Pipeline{}), // lift empty struct
RIO.Bind(userLens.Set, func(_ Pipeline) RIO.ReaderIOResult[User] { return fetchUser(42) }),
RIO.Bind(configLens.Set, F.Flow2(userLens.Get, fetchConfigForUser)), // read s.User, pass to fetcher
RIO.Bind(postsLens.Set, F.Flow2(userLens.Get, fetchPostsForUser)), // read s.User, pass to fetcher
)
pipeline, err := result(context.Background())()
// pipeline.User, pipeline.Config, pipeline.Posts are all populated
```
The setter signature is `func(T) func(S1) S2` — it takes the new value and returns a state transformer. `lens.Set` already has this shape, so no manual setter functions are needed. `F.Flow2(lens.Get, f)` composes the field getter with any Kleisli arrow `f` point-free.
### `ApS` — Independent, Applicative Steps
`ApS` uses **applicative** semantics: `fa` is evaluated without any access to the current state. Use it when steps have no dependency on each other — the library can choose to execute them concurrently.
```go
import (
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
F "github.com/IBM/fp-go/v2/function"
L "github.com/IBM/fp-go/v2/optics/lens"
)
type Summary struct {
User User
Weather Weather
}
var (
userLens = L.MakeLens(func(s Summary) User { return s.User }, func(s Summary, u User) Summary { s.User = u; return s })
weatherLens = L.MakeLens(func(s Summary) Weather { return s.Weather }, func(s Summary, w Weather) Summary { s.Weather = w; return s })
)
// Both are independent — neither needs the other's result
result := F.Pipe2(
RIO.Do(Summary{}),
RIO.ApS(userLens.Set, fetchUser(42)),
RIO.ApS(weatherLens.Set, fetchWeather("NYC")),
)
```
**Key difference from `Bind`:**
| | `Bind(setter, f)` | `ApS(setter, fa)` |
|-|---|---|
| Second argument | `func(S1) M[T]` — a **function** of state | `M[T]` — a **fixed** monadic value |
| Can read prior state? | Yes — receives `S1` | No — no access to state |
| Semantics | Monadic (sequential) | Applicative (independent) |
### `Let` and `LetTo` — Pure Additions
`Let` adds a value computed by a **pure function** of the current state (no monad, cannot fail):
```go
import (
RIO "github.com/IBM/fp-go/v2/context/readerioresult"
F "github.com/IBM/fp-go/v2/function"
L "github.com/IBM/fp-go/v2/optics/lens"
)
type Enriched struct {
User User
FullName string
}
var (
userLens = L.MakeLens(func(s Enriched) User { return s.User }, func(s Enriched, u User) Enriched { s.User = u; return s })
fullNameLens = L.MakeLens(func(s Enriched) string { return s.FullName }, func(s Enriched, n string) Enriched { s.FullName = n; return s })
)
fullName := func(u User) string { return u.FirstName + " " + u.LastName }
result := F.Pipe2(
RIO.Do(Enriched{}),
RIO.Bind(userLens.Set, func(_ Enriched) RIO.ReaderIOResult[User] { return fetchUser(42) }),
RIO.Let(fullNameLens.Set, F.Flow2(userLens.Get, fullName)), // read s.User, compute pure string
)
```
`LetTo` adds a **constant** with no computation:
```go
RIO.LetTo(setVersion, "v1.2.3")
```
### `BindTo` — Starting from an Existing Value
When you have an existing `M[T]` and want to project it into a state struct rather than starting from `Do(empty)`:
```go
type State struct{ User User }
result := F.Pipe1(
fetchUser(42), // ReaderIOResult[User]
RIO.BindTo(func(u User) State { return State{User: u} }),// ReaderIOResult[State]
)
```
### Lens Variants (`ApSL`, `BindL`, `LetL`, `LetToL`)
If you have a `Lens[S, T]` (from the optics system or code generation), you can skip writing the setter function entirely:
```go
import (
RO "github.com/IBM/fp-go/v2/readeroption"
F "github.com/IBM/fp-go/v2/function"
)
// Lenses generated by go:generate (see optics/README.md)
// personLenses.Name : Lens[*Person, Name]
// personLenses.Age : Lens[*Person, Age]
makePerson := F.Pipe2(
RO.Do[*PartialPerson](emptyPerson),
RO.ApSL(personLenses.Name, maybeName), // replaces: ApS(personLenses.Name.Set, maybeName)
RO.ApSL(personLenses.Age, maybeAge),
)
```
This exact pattern is used in [`samples/builder`](samples/builder/builder.go) to validate and construct a `Person` from an unvalidated `PartialPerson`.
### Lifted Variants for Mixed Monads
`context/readerioresult` provides `Bind*K` helpers that lift simpler computations directly into the do-chain:
| Helper | Lifts |
|--------|-------|
| `BindResultK` / `BindEitherK` | `func(S1) (T, error)` — pure result |
| `BindIOResultK` / `BindIOEitherK` | `func(S1) func() (T, error)` — lazy IO result |
| `BindIOK` | `func(S1) func() T` — infallible IO |
| `BindReaderK` | `func(S1) func(ctx) T` — context reader |
```go
RIO.BindResultK(setUser, func(s Pipeline) (User, error) {
return validateAndBuild(s) // plain (value, error) function, no wrapping needed
})
```
### Decision Guide
```
Does the new step need to read prior accumulated state?
YES → Bind (monadic, sequential; f receives current S)
NO → ApS (applicative, independent; fa is a fixed M[T])
Is the new value derived purely from state, with no monad?
YES → Let (pure function of S)
Is the new value a compile-time or runtime constant?
YES → LetTo
Starting from an existing M[T] rather than an empty struct?
YES → BindTo
```
### Complete Example — `result` Monad
The same pattern works with simpler monads. Here with `result.Result[A]`:
`Eitherize1` converts any standard `func(A) (B, error)` into `func(A) Result[B]`. Define these lifted functions once as variables. Then use lenses to focus on individual struct fields and compose with `F.Flow2(lens.Get, f)` — no inline lambdas, no manual error handling.
```go
import (
R "github.com/IBM/fp-go/v2/result"
F "github.com/IBM/fp-go/v2/function"
L "github.com/IBM/fp-go/v2/optics/lens"
N "github.com/IBM/fp-go/v2/number"
"strconv"
)
type Parsed struct {
Raw string
Number int
Double int
}
// Lenses — focus on individual fields of Parsed.
var (
rawLens = L.MakeLens(
func(s Parsed) string { return s.Raw },
func(s Parsed, v string) Parsed { s.Raw = v; return s },
)
numberLens = L.MakeLens(
func(s Parsed) int { return s.Number },
func(s Parsed, v int) Parsed { s.Number = v; return s },
)
doubleLens = L.MakeLens(
func(s Parsed) int { return s.Double },
func(s Parsed, v int) Parsed { s.Double = v; return s },
)
)
// Lifted functions — convert standard (value, error) functions into Result-returning ones.
var (
atoi = R.Eitherize1(strconv.Atoi) // func(string) Result[int]
)
parse := func(input string) R.Result[Parsed] {
return F.Pipe3(
R.Do(Parsed{}),
R.LetTo(rawLens.Set, input), // set Raw to constant input
R.Bind(numberLens.Set, F.Flow2(rawLens.Get, atoi)), // get Raw, parse → Result[int]
R.Let(doubleLens.Set, F.Flow2(numberLens.Get, N.Mul(2))), // get Number, multiply → int
)
}
parse("21") // Ok(Parsed{Raw:"21", Number:21, Double:42})
parse("abc") // Error(strconv parse error)
```
`rawLens.Set` is already `func(string) func(Parsed) Parsed`, matching the setter signature `Bind` and `LetTo` expect — no manual setter functions to write. `F.Flow2(rawLens.Get, atoi)` composes the field getter with the eitherized parse function into a `Kleisli[Parsed, int]` without any intermediate lambda.
## Import Paths
```go
import (
"github.com/IBM/fp-go/v2/option"
"github.com/IBM/fp-go/v2/result"
"github.com/IBM/fp-go/v2/either"
"github.com/IBM/fp-go/v2/io"
"github.com/IBM/fp-go/v2/ioresult"
"github.com/IBM/fp-go/v2/context/readerioresult"
"github.com/IBM/fp-go/v2/effect"
F "github.com/IBM/fp-go/v2/function"
A "github.com/IBM/fp-go/v2/array"
)
```
Requires Go 1.24+ (generic type aliases).

View File

@@ -3,6 +3,7 @@
[![Go Reference](https://pkg.go.dev/badge/github.com/IBM/fp-go/v2.svg)](https://pkg.go.dev/github.com/IBM/fp-go/v2)
[![Coverage Status](https://coveralls.io/repos/github/IBM/fp-go/badge.svg?branch=main&flag=v2)](https://coveralls.io/github/IBM/fp-go?branch=main)
[![Go Report Card](https://goreportcard.com/badge/github.com/IBM/fp-go/v2)](https://goreportcard.com/report/github.com/IBM/fp-go/v2)
[![Context7](https://img.shields.io/badge/context7-docs-blue)](https://context7.com/ibm/fp-go)
**fp-go** is a comprehensive functional programming library for Go, bringing type-safe functional patterns inspired by [fp-ts](https://gcanti.github.io/fp-ts/) to the Go ecosystem. Version 2 leverages [generic type aliases](https://github.com/golang/go/issues/46477) introduced in Go 1.24, providing a more ergonomic and streamlined API.

522
v2/array/array_nil_test.go Normal file
View File

@@ -0,0 +1,522 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package array
import (
"fmt"
"testing"
O "github.com/IBM/fp-go/v2/option"
P "github.com/IBM/fp-go/v2/pair"
S "github.com/IBM/fp-go/v2/string"
"github.com/stretchr/testify/assert"
)
// TestNilSlice_IsEmpty verifies that IsEmpty handles nil slices correctly
func TestNilSlice_IsEmpty(t *testing.T) {
var nilSlice []int
assert.True(t, IsEmpty(nilSlice), "nil slice should be empty")
}
// TestNilSlice_IsNonEmpty verifies that IsNonEmpty handles nil slices correctly
func TestNilSlice_IsNonEmpty(t *testing.T) {
var nilSlice []int
assert.False(t, IsNonEmpty(nilSlice), "nil slice should not be non-empty")
}
// TestNilSlice_MonadMap verifies that MonadMap handles nil slices correctly
func TestNilSlice_MonadMap(t *testing.T) {
var nilSlice []int
result := MonadMap(nilSlice, func(v int) string {
return fmt.Sprintf("%d", v)
})
assert.NotNil(t, result, "MonadMap should return non-nil slice")
assert.Equal(t, 0, len(result), "MonadMap should return empty slice for nil input")
}
// TestNilSlice_MonadMapRef verifies that MonadMapRef handles nil slices correctly
func TestNilSlice_MonadMapRef(t *testing.T) {
var nilSlice []int
result := MonadMapRef(nilSlice, func(v *int) string {
return fmt.Sprintf("%d", *v)
})
assert.NotNil(t, result, "MonadMapRef should return non-nil slice")
assert.Equal(t, 0, len(result), "MonadMapRef should return empty slice for nil input")
}
// TestNilSlice_Map verifies that Map handles nil slices correctly
func TestNilSlice_Map(t *testing.T) {
var nilSlice []int
mapper := Map(func(v int) string {
return fmt.Sprintf("%d", v)
})
result := mapper(nilSlice)
assert.NotNil(t, result, "Map should return non-nil slice")
assert.Equal(t, 0, len(result), "Map should return empty slice for nil input")
}
// TestNilSlice_MapRef verifies that MapRef handles nil slices correctly
func TestNilSlice_MapRef(t *testing.T) {
var nilSlice []int
mapper := MapRef(func(v *int) string {
return fmt.Sprintf("%d", *v)
})
result := mapper(nilSlice)
assert.NotNil(t, result, "MapRef should return non-nil slice")
assert.Equal(t, 0, len(result), "MapRef should return empty slice for nil input")
}
// TestNilSlice_MapWithIndex verifies that MapWithIndex handles nil slices correctly
func TestNilSlice_MapWithIndex(t *testing.T) {
var nilSlice []int
mapper := MapWithIndex(func(i int, v int) string {
return fmt.Sprintf("%d:%d", i, v)
})
result := mapper(nilSlice)
assert.NotNil(t, result, "MapWithIndex should return non-nil slice")
assert.Equal(t, 0, len(result), "MapWithIndex should return empty slice for nil input")
}
// TestNilSlice_Filter verifies that Filter handles nil slices correctly
func TestNilSlice_Filter(t *testing.T) {
var nilSlice []int
filter := Filter(func(v int) bool {
return v > 0
})
result := filter(nilSlice)
assert.NotNil(t, result, "Filter should return non-nil slice")
assert.Equal(t, 0, len(result), "Filter should return empty slice for nil input")
}
// TestNilSlice_FilterWithIndex verifies that FilterWithIndex handles nil slices correctly
func TestNilSlice_FilterWithIndex(t *testing.T) {
var nilSlice []int
filter := FilterWithIndex(func(i int, v int) bool {
return v > 0
})
result := filter(nilSlice)
assert.NotNil(t, result, "FilterWithIndex should return non-nil slice")
assert.Equal(t, 0, len(result), "FilterWithIndex should return empty slice for nil input")
}
// TestNilSlice_FilterRef verifies that FilterRef handles nil slices correctly
func TestNilSlice_FilterRef(t *testing.T) {
var nilSlice []int
filter := FilterRef(func(v *int) bool {
return *v > 0
})
result := filter(nilSlice)
assert.NotNil(t, result, "FilterRef should return non-nil slice")
assert.Equal(t, 0, len(result), "FilterRef should return empty slice for nil input")
}
// TestNilSlice_MonadFilterMap verifies that MonadFilterMap handles nil slices correctly
func TestNilSlice_MonadFilterMap(t *testing.T) {
var nilSlice []int
result := MonadFilterMap(nilSlice, func(v int) O.Option[string] {
return O.Some(fmt.Sprintf("%d", v))
})
assert.NotNil(t, result, "MonadFilterMap should return non-nil slice")
assert.Equal(t, 0, len(result), "MonadFilterMap should return empty slice for nil input")
}
// TestNilSlice_MonadFilterMapWithIndex verifies that MonadFilterMapWithIndex handles nil slices correctly
func TestNilSlice_MonadFilterMapWithIndex(t *testing.T) {
var nilSlice []int
result := MonadFilterMapWithIndex(nilSlice, func(i int, v int) O.Option[string] {
return O.Some(fmt.Sprintf("%d:%d", i, v))
})
assert.NotNil(t, result, "MonadFilterMapWithIndex should return non-nil slice")
assert.Equal(t, 0, len(result), "MonadFilterMapWithIndex should return empty slice for nil input")
}
// TestNilSlice_FilterMap verifies that FilterMap handles nil slices correctly
func TestNilSlice_FilterMap(t *testing.T) {
var nilSlice []int
filter := FilterMap(func(v int) O.Option[string] {
return O.Some(fmt.Sprintf("%d", v))
})
result := filter(nilSlice)
assert.NotNil(t, result, "FilterMap should return non-nil slice")
assert.Equal(t, 0, len(result), "FilterMap should return empty slice for nil input")
}
// TestNilSlice_FilterMapWithIndex verifies that FilterMapWithIndex handles nil slices correctly
func TestNilSlice_FilterMapWithIndex(t *testing.T) {
var nilSlice []int
filter := FilterMapWithIndex(func(i int, v int) O.Option[string] {
return O.Some(fmt.Sprintf("%d:%d", i, v))
})
result := filter(nilSlice)
assert.NotNil(t, result, "FilterMapWithIndex should return non-nil slice")
assert.Equal(t, 0, len(result), "FilterMapWithIndex should return empty slice for nil input")
}
// TestNilSlice_MonadReduce verifies that MonadReduce handles nil slices correctly
func TestNilSlice_MonadReduce(t *testing.T) {
var nilSlice []int
result := MonadReduce(nilSlice, func(acc int, v int) int {
return acc + v
}, 10)
assert.Equal(t, 10, result, "MonadReduce should return initial value for nil slice")
}
// TestNilSlice_MonadReduceWithIndex verifies that MonadReduceWithIndex handles nil slices correctly
func TestNilSlice_MonadReduceWithIndex(t *testing.T) {
var nilSlice []int
result := MonadReduceWithIndex(nilSlice, func(i int, acc int, v int) int {
return acc + v
}, 10)
assert.Equal(t, 10, result, "MonadReduceWithIndex should return initial value for nil slice")
}
// TestNilSlice_Reduce verifies that Reduce handles nil slices correctly
func TestNilSlice_Reduce(t *testing.T) {
var nilSlice []int
reducer := Reduce(func(acc int, v int) int {
return acc + v
}, 10)
result := reducer(nilSlice)
assert.Equal(t, 10, result, "Reduce should return initial value for nil slice")
}
// TestNilSlice_ReduceWithIndex verifies that ReduceWithIndex handles nil slices correctly
func TestNilSlice_ReduceWithIndex(t *testing.T) {
var nilSlice []int
reducer := ReduceWithIndex(func(i int, acc int, v int) int {
return acc + v
}, 10)
result := reducer(nilSlice)
assert.Equal(t, 10, result, "ReduceWithIndex should return initial value for nil slice")
}
// TestNilSlice_ReduceRight verifies that ReduceRight handles nil slices correctly
func TestNilSlice_ReduceRight(t *testing.T) {
var nilSlice []int
reducer := ReduceRight(func(v int, acc int) int {
return acc + v
}, 10)
result := reducer(nilSlice)
assert.Equal(t, 10, result, "ReduceRight should return initial value for nil slice")
}
// TestNilSlice_ReduceRightWithIndex verifies that ReduceRightWithIndex handles nil slices correctly
func TestNilSlice_ReduceRightWithIndex(t *testing.T) {
var nilSlice []int
reducer := ReduceRightWithIndex(func(i int, v int, acc int) int {
return acc + v
}, 10)
result := reducer(nilSlice)
assert.Equal(t, 10, result, "ReduceRightWithIndex should return initial value for nil slice")
}
// TestNilSlice_ReduceRef verifies that ReduceRef handles nil slices correctly
func TestNilSlice_ReduceRef(t *testing.T) {
var nilSlice []int
reducer := ReduceRef(func(acc int, v *int) int {
return acc + *v
}, 10)
result := reducer(nilSlice)
assert.Equal(t, 10, result, "ReduceRef should return initial value for nil slice")
}
// TestNilSlice_Append verifies that Append handles nil slices correctly
func TestNilSlice_Append(t *testing.T) {
var nilSlice []int
result := Append(nilSlice, 42)
assert.NotNil(t, result, "Append should return non-nil slice")
assert.Equal(t, 1, len(result), "Append should create slice with one element")
assert.Equal(t, 42, result[0], "Append should add element correctly")
}
// TestNilSlice_MonadChain verifies that MonadChain handles nil slices correctly
func TestNilSlice_MonadChain(t *testing.T) {
var nilSlice []int
result := MonadChain(nilSlice, func(v int) []string {
return []string{fmt.Sprintf("%d", v)}
})
assert.NotNil(t, result, "MonadChain should return non-nil slice")
assert.Equal(t, 0, len(result), "MonadChain should return empty slice for nil input")
}
// TestNilSlice_Chain verifies that Chain handles nil slices correctly
func TestNilSlice_Chain(t *testing.T) {
var nilSlice []int
chain := Chain(func(v int) []string {
return []string{fmt.Sprintf("%d", v)}
})
result := chain(nilSlice)
assert.NotNil(t, result, "Chain should return non-nil slice")
assert.Equal(t, 0, len(result), "Chain should return empty slice for nil input")
}
// TestNilSlice_MonadAp verifies that MonadAp handles nil slices correctly
func TestNilSlice_MonadAp(t *testing.T) {
var nilFuncs []func(int) string
var nilValues []int
// nil functions, nil values
result1 := MonadAp(nilFuncs, nilValues)
assert.NotNil(t, result1, "MonadAp should return non-nil slice")
assert.Equal(t, 0, len(result1), "MonadAp should return empty slice for nil inputs")
// nil functions, non-nil values
nonNilValues := []int{1, 2, 3}
result2 := MonadAp(nilFuncs, nonNilValues)
assert.NotNil(t, result2, "MonadAp should return non-nil slice")
assert.Equal(t, 0, len(result2), "MonadAp should return empty slice when functions are nil")
// non-nil functions, nil values
nonNilFuncs := []func(int) string{func(v int) string { return fmt.Sprintf("%d", v) }}
result3 := MonadAp(nonNilFuncs, nilValues)
assert.NotNil(t, result3, "MonadAp should return non-nil slice")
assert.Equal(t, 0, len(result3), "MonadAp should return empty slice when values are nil")
}
// TestNilSlice_Ap verifies that Ap handles nil slices correctly
func TestNilSlice_Ap(t *testing.T) {
var nilValues []int
ap := Ap[string](nilValues)
var nilFuncs []func(int) string
result := ap(nilFuncs)
assert.NotNil(t, result, "Ap should return non-nil slice")
assert.Equal(t, 0, len(result), "Ap should return empty slice for nil inputs")
}
// TestNilSlice_Head verifies that Head handles nil slices correctly
func TestNilSlice_Head(t *testing.T) {
var nilSlice []int
result := Head(nilSlice)
assert.True(t, O.IsNone(result), "Head should return None for nil slice")
}
// TestNilSlice_First verifies that First handles nil slices correctly
func TestNilSlice_First(t *testing.T) {
var nilSlice []int
result := First(nilSlice)
assert.True(t, O.IsNone(result), "First should return None for nil slice")
}
// TestNilSlice_Last verifies that Last handles nil slices correctly
func TestNilSlice_Last(t *testing.T) {
var nilSlice []int
result := Last(nilSlice)
assert.True(t, O.IsNone(result), "Last should return None for nil slice")
}
// TestNilSlice_Tail verifies that Tail handles nil slices correctly
func TestNilSlice_Tail(t *testing.T) {
var nilSlice []int
result := Tail(nilSlice)
assert.True(t, O.IsNone(result), "Tail should return None for nil slice")
}
// TestNilSlice_Flatten verifies that Flatten handles nil slices correctly
func TestNilSlice_Flatten(t *testing.T) {
var nilSlice [][]int
result := Flatten(nilSlice)
assert.NotNil(t, result, "Flatten should return non-nil slice")
assert.Equal(t, 0, len(result), "Flatten should return empty slice for nil input")
}
// TestNilSlice_Lookup verifies that Lookup handles nil slices correctly
func TestNilSlice_Lookup(t *testing.T) {
var nilSlice []int
lookup := Lookup[int](0)
result := lookup(nilSlice)
assert.True(t, O.IsNone(result), "Lookup should return None for nil slice")
}
// TestNilSlice_Size verifies that Size handles nil slices correctly
func TestNilSlice_Size(t *testing.T) {
var nilSlice []int
result := Size(nilSlice)
assert.Equal(t, 0, result, "Size should return 0 for nil slice")
}
// TestNilSlice_MonadPartition verifies that MonadPartition handles nil slices correctly
func TestNilSlice_MonadPartition(t *testing.T) {
var nilSlice []int
result := MonadPartition(nilSlice, func(v int) bool {
return v > 0
})
left := P.Head(result)
right := P.Tail(result)
assert.NotNil(t, left, "MonadPartition left should return non-nil slice")
assert.NotNil(t, right, "MonadPartition right should return non-nil slice")
assert.Equal(t, 0, len(left), "MonadPartition left should be empty for nil input")
assert.Equal(t, 0, len(right), "MonadPartition right should be empty for nil input")
}
// TestNilSlice_Partition verifies that Partition handles nil slices correctly
func TestNilSlice_Partition(t *testing.T) {
var nilSlice []int
partition := Partition(func(v int) bool {
return v > 0
})
result := partition(nilSlice)
left := P.Head(result)
right := P.Tail(result)
assert.NotNil(t, left, "Partition left should return non-nil slice")
assert.NotNil(t, right, "Partition right should return non-nil slice")
assert.Equal(t, 0, len(left), "Partition left should be empty for nil input")
assert.Equal(t, 0, len(right), "Partition right should be empty for nil input")
}
// TestNilSlice_IsNil verifies that IsNil handles nil slices correctly
func TestNilSlice_IsNil(t *testing.T) {
var nilSlice []int
assert.True(t, IsNil(nilSlice), "IsNil should return true for nil slice")
nonNilSlice := []int{}
assert.False(t, IsNil(nonNilSlice), "IsNil should return false for non-nil empty slice")
}
// TestNilSlice_IsNonNil verifies that IsNonNil handles nil slices correctly
func TestNilSlice_IsNonNil(t *testing.T) {
var nilSlice []int
assert.False(t, IsNonNil(nilSlice), "IsNonNil should return false for nil slice")
nonNilSlice := []int{}
assert.True(t, IsNonNil(nonNilSlice), "IsNonNil should return true for non-nil empty slice")
}
// TestNilSlice_Copy verifies that Copy handles nil slices correctly
func TestNilSlice_Copy(t *testing.T) {
var nilSlice []int
result := Copy(nilSlice)
assert.NotNil(t, result, "Copy should return non-nil slice")
assert.Equal(t, 0, len(result), "Copy should return empty slice for nil input")
}
// TestNilSlice_FoldMap verifies that FoldMap handles nil slices correctly
func TestNilSlice_FoldMap(t *testing.T) {
var nilSlice []int
monoid := S.Monoid
foldMap := FoldMap[int](monoid)(func(v int) string {
return fmt.Sprintf("%d", v)
})
result := foldMap(nilSlice)
assert.Equal(t, "", result, "FoldMap should return empty value for nil slice")
}
// TestNilSlice_FoldMapWithIndex verifies that FoldMapWithIndex handles nil slices correctly
func TestNilSlice_FoldMapWithIndex(t *testing.T) {
var nilSlice []int
monoid := S.Monoid
foldMap := FoldMapWithIndex[int](monoid)(func(i int, v int) string {
return fmt.Sprintf("%d:%d", i, v)
})
result := foldMap(nilSlice)
assert.Equal(t, "", result, "FoldMapWithIndex should return empty value for nil slice")
}
// TestNilSlice_Fold verifies that Fold handles nil slices correctly
func TestNilSlice_Fold(t *testing.T) {
var nilSlice []string
monoid := S.Monoid
fold := Fold[string](monoid)
result := fold(nilSlice)
assert.Equal(t, "", result, "Fold should return empty value for nil slice")
}
// TestNilSlice_Concat verifies that Concat handles nil slices correctly
func TestNilSlice_Concat(t *testing.T) {
var nilSlice []int
nonNilSlice := []int{1, 2, 3}
// nil concat non-nil
concat1 := Concat(nonNilSlice)
result1 := concat1(nilSlice)
assert.Equal(t, nonNilSlice, result1, "nil concat non-nil should return non-nil slice")
// non-nil concat nil
concat2 := Concat(nilSlice)
result2 := concat2(nonNilSlice)
assert.Equal(t, nonNilSlice, result2, "non-nil concat nil should return non-nil slice")
// nil concat nil
concat3 := Concat(nilSlice)
result3 := concat3(nilSlice)
assert.Nil(t, result3, "nil concat nil should return nil")
}
// TestNilSlice_MonadFlap verifies that MonadFlap handles nil slices correctly
func TestNilSlice_MonadFlap(t *testing.T) {
var nilSlice []func(int) string
result := MonadFlap(nilSlice, 42)
assert.NotNil(t, result, "MonadFlap should return non-nil slice")
assert.Equal(t, 0, len(result), "MonadFlap should return empty slice for nil input")
}
// TestNilSlice_Flap verifies that Flap handles nil slices correctly
func TestNilSlice_Flap(t *testing.T) {
var nilSlice []func(int) string
flap := Flap[string, int](42)
result := flap(nilSlice)
assert.NotNil(t, result, "Flap should return non-nil slice")
assert.Equal(t, 0, len(result), "Flap should return empty slice for nil input")
}
// TestNilSlice_Reverse verifies that Reverse handles nil slices correctly
func TestNilSlice_Reverse(t *testing.T) {
var nilSlice []int
result := Reverse(nilSlice)
assert.Nil(t, result, "Reverse should return nil for nil slice")
}
// TestNilSlice_Extend verifies that Extend handles nil slices correctly
func TestNilSlice_Extend(t *testing.T) {
var nilSlice []int
extend := Extend(func(as []int) string {
return fmt.Sprintf("%v", as)
})
result := extend(nilSlice)
assert.NotNil(t, result, "Extend should return non-nil slice")
assert.Equal(t, 0, len(result), "Extend should return empty slice for nil input")
}
// TestNilSlice_Empty verifies that Empty creates an empty non-nil slice
func TestNilSlice_Empty(t *testing.T) {
result := Empty[int]()
assert.NotNil(t, result, "Empty should return non-nil slice")
assert.Equal(t, 0, len(result), "Empty should return empty slice")
assert.False(t, IsNil(result), "Empty should not return nil slice")
}
// TestNilSlice_Zero verifies that Zero creates an empty non-nil slice
func TestNilSlice_Zero(t *testing.T) {
result := Zero[int]()
assert.NotNil(t, result, "Zero should return non-nil slice")
assert.Equal(t, 0, len(result), "Zero should return empty slice")
assert.False(t, IsNil(result), "Zero should not return nil slice")
}
// TestNilSlice_ConstNil verifies that ConstNil returns a nil slice
func TestNilSlice_ConstNil(t *testing.T) {
result := ConstNil[int]()
assert.Nil(t, result, "ConstNil should return nil slice")
assert.True(t, IsNil(result), "ConstNil should return nil slice")
}
// TestNilSlice_Of verifies that Of creates a proper singleton slice
func TestNilSlice_Of(t *testing.T) {
result := Of(42)
assert.NotNil(t, result, "Of should return non-nil slice")
assert.Equal(t, 1, len(result), "Of should create slice with one element")
assert.Equal(t, 42, result[0], "Of should set value correctly")
}

130
v2/context/reader/reader.go Normal file
View File

@@ -0,0 +1,130 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package reader provides a specialization of the Reader monad for [context.Context].
//
// This package offers a context-aware Reader monad that simplifies working with
// Go's [context.Context] in a functional programming style. It eliminates the need
// to explicitly thread context through function calls while maintaining type safety
// and composability.
//
// # Core Concept
//
// The Reader monad represents computations that depend on a shared environment.
// In this package, that environment is fixed to [context.Context], making it
// particularly useful for:
//
// - Request-scoped data propagation
// - Cancellation and timeout handling
// - Dependency injection via context values
// - Avoiding explicit context parameter threading
//
// # Type Definitions
//
// - Reader[A]: A computation that depends on context.Context and produces A
// - Kleisli[A, B]: A function from A to Reader[B] for composing computations
// - Operator[A, B]: A transformation from Reader[A] to Reader[B]
//
// # Usage Pattern
//
// Instead of passing context explicitly through every function:
//
// func processUser(ctx context.Context, userID string) (User, error) {
// user := fetchUser(ctx, userID)
// profile := fetchProfile(ctx, user.ProfileID)
// return enrichUser(ctx, user, profile), nil
// }
//
// You can use Reader to compose context-dependent operations:
//
// fetchUser := func(userID string) Reader[User] {
// return func(ctx context.Context) User {
// // Use ctx for database access, cancellation, etc.
// return queryDatabase(ctx, userID)
// }
// }
//
// processUser := func(userID string) Reader[User] {
// return F.Pipe2(
// fetchUser(userID),
// reader.Chain(func(user User) Reader[Profile] {
// return fetchProfile(user.ProfileID)
// }),
// reader.Map(func(profile Profile) User {
// return enrichUser(user, profile)
// }),
// )
// }
//
// // Execute with context
// ctx := context.Background()
// user := processUser("user123")(ctx)
//
// # Integration with Standard Library
//
// This package works seamlessly with Go's standard [context] package:
//
// - Context cancellation and deadlines are preserved
// - Context values can be accessed within Reader computations
// - Readers can be composed with context-aware libraries
//
// # Relationship to Other Packages
//
// This package is a specialization of [github.com/IBM/fp-go/v2/reader] where
// the environment type R is fixed to [context.Context]. For more general
// Reader operations, see the base reader package.
//
// For combining Reader with other monads:
// - [github.com/IBM/fp-go/v2/context/readerio]: Reader + IO effects
// - [github.com/IBM/fp-go/v2/readeroption]: Reader + Option
// - [github.com/IBM/fp-go/v2/readerresult]: Reader + Result (Either)
//
// # Example: HTTP Request Handler
//
// type RequestContext struct {
// UserID string
// RequestID string
// }
//
// // Extract request context from context.Context
// getRequestContext := func(ctx context.Context) RequestContext {
// return RequestContext{
// UserID: ctx.Value("userID").(string),
// RequestID: ctx.Value("requestID").(string),
// }
// }
//
// // A Reader that logs with request context
// logInfo := func(message string) Reader[function.Void] {
// return func(ctx context.Context) function.Void {
// reqCtx := getRequestContext(ctx)
// log.Printf("[%s] User %s: %s", reqCtx.RequestID, reqCtx.UserID, message)
// return function.VOID
// }
// }
//
// // Compose operations
// handleRequest := func(data string) Reader[Response] {
// return F.Pipe2(
// logInfo("Processing request"),
// reader.Chain(func(_ function.Void) Reader[Result] {
// return processData(data)
// }),
// reader.Map(func(result Result) Response {
// return Response{Data: result}
// }),
// )
// }
package reader

142
v2/context/reader/types.go Normal file
View File

@@ -0,0 +1,142 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package reader
import (
"context"
R "github.com/IBM/fp-go/v2/reader"
)
type (
// Reader represents a computation that depends on a [context.Context] and produces a value of type A.
//
// This is a specialization of the generic Reader monad where the environment type is fixed
// to [context.Context]. This is particularly useful for Go applications that need to thread
// context through computations for cancellation, deadlines, and request-scoped values.
//
// Type Parameters:
// - A: The result type produced by the computation
//
// Reader[A] is equivalent to func(context.Context) A
//
// The Reader monad enables:
// - Dependency injection using context values
// - Cancellation and timeout handling
// - Request-scoped data propagation
// - Avoiding explicit context parameter threading
//
// Example:
//
// // A Reader that extracts a user ID from context
// getUserID := func(ctx context.Context) string {
// if userID, ok := ctx.Value("userID").(string); ok {
// return userID
// }
// return "anonymous"
// }
//
// // A Reader that checks if context is cancelled
// isCancelled := func(ctx context.Context) bool {
// select {
// case <-ctx.Done():
// return true
// default:
// return false
// }
// }
//
// // Use the readers with a context
// ctx := context.WithValue(context.Background(), "userID", "user123")
// userID := getUserID(ctx) // "user123"
// cancelled := isCancelled(ctx) // false
Reader[A any] = R.Reader[context.Context, A]
// Kleisli represents a Kleisli arrow for the context-based Reader monad.
//
// It's a function from A to Reader[B], used for composing Reader computations
// that all depend on the same [context.Context].
//
// Type Parameters:
// - A: The input type
// - B: The output type wrapped in Reader
//
// Kleisli[A, B] is equivalent to func(A) func(context.Context) B
//
// Kleisli arrows are fundamental for monadic composition, allowing you to chain
// operations that depend on context without explicitly passing the context through
// each function call.
//
// Example:
//
// // A Kleisli arrow that creates a greeting Reader from a name
// greet := func(name string) Reader[string] {
// return func(ctx context.Context) string {
// if deadline, ok := ctx.Deadline(); ok {
// return fmt.Sprintf("Hello %s (deadline: %v)", name, deadline)
// }
// return fmt.Sprintf("Hello %s", name)
// }
// }
//
// // Use the Kleisli arrow
// ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
// defer cancel()
// greeting := greet("Alice")(ctx) // "Hello Alice (deadline: ...)"
Kleisli[A, B any] = R.Reader[A, Reader[B]]
// Operator represents a transformation from one Reader to another.
//
// It takes a Reader[A] and produces a Reader[B], where both readers depend on
// the same [context.Context]. This type is commonly used for operations like
// Map, Chain, and other transformations that convert readers while preserving
// the context dependency.
//
// Type Parameters:
// - A: The input Reader's result type
// - B: The output Reader's result type
//
// Operator[A, B] is equivalent to func(Reader[A]) func(context.Context) B
//
// Operators enable building pipelines of context-dependent computations where
// each step can transform the result of the previous computation while maintaining
// access to the shared context.
//
// Example:
//
// // An operator that transforms int readers to string readers
// intToString := func(r Reader[int]) Reader[string] {
// return func(ctx context.Context) string {
// value := r(ctx)
// return strconv.Itoa(value)
// }
// }
//
// // A Reader that extracts a timeout value from context
// getTimeout := func(ctx context.Context) int {
// if deadline, ok := ctx.Deadline(); ok {
// return int(time.Until(deadline).Seconds())
// }
// return 0
// }
//
// // Transform the Reader
// getTimeoutStr := intToString(getTimeout)
// ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
// defer cancel()
// result := getTimeoutStr(ctx) // "30" (approximately)
Operator[A, B any] = Kleisli[Reader[A], B]
)

View File

@@ -1,6 +1,7 @@
package readerio
import (
"github.com/IBM/fp-go/v2/function"
RIO "github.com/IBM/fp-go/v2/readerio"
)
@@ -73,3 +74,117 @@ func Bracket[
) ReaderIO[B] {
return RIO.Bracket(acquire, use, release)
}
// WithResource creates a higher-order function that manages a resource lifecycle for any operation.
// It returns a Kleisli arrow that takes a use function and automatically handles resource
// acquisition and cleanup using the bracket pattern.
//
// This is a more composable alternative to Bracket, allowing you to define resource management
// once and reuse it with different use functions. The resource is acquired when the returned
// Kleisli arrow is invoked, used by the provided function, and then released regardless of
// success or failure.
//
// Type Parameters:
// - A: The type of the resource to be managed
// - B: The type of the result produced by the use function
// - ANY: The type returned by the release function (typically ignored)
//
// Parameters:
// - onCreate: A ReaderIO that acquires/creates the resource
// - onRelease: A Kleisli arrow that releases/cleans up the resource
//
// Returns:
// - A Kleisli arrow that takes a use function and returns a ReaderIO managing the full lifecycle
//
// Example with database connection:
//
// // Define resource management once
// withDB := WithResource(
// // Acquire connection
// func(ctx context.Context) IO[*sql.DB] {
// return func() *sql.DB {
// db, _ := sql.Open("postgres", "connection-string")
// return db
// }
// },
// // Release connection
// func(db *sql.DB) ReaderIO[any] {
// return func(ctx context.Context) IO[any] {
// return func() any {
// db.Close()
// return nil
// }
// }
// },
// )
//
// // Reuse with different operations
// queryUsers := withDB(func(db *sql.DB) ReaderIO[[]User] {
// return func(ctx context.Context) IO[[]User] {
// return func() []User {
// // Query users from db
// return users
// }
// }
// })
//
// insertUser := withDB(func(db *sql.DB) ReaderIO[int64] {
// return func(ctx context.Context) IO[int64] {
// return func() int64 {
// // Insert user into db
// return userID
// }
// }
// })
//
// Example with file handling:
//
// withFile := WithResource(
// func(ctx context.Context) IO[*os.File] {
// return func() *os.File {
// f, _ := os.Open("data.txt")
// return f
// }
// },
// func(f *os.File) ReaderIO[any] {
// return func(ctx context.Context) IO[any] {
// return func() any {
// f.Close()
// return nil
// }
// }
// },
// )
//
// // Use for reading
// readContent := withFile(func(f *os.File) ReaderIO[string] {
// return func(ctx context.Context) IO[string] {
// return func() string {
// data, _ := io.ReadAll(f)
// return string(data)
// }
// }
// })
//
// // Use for getting file info
// getSize := withFile(func(f *os.File) ReaderIO[int64] {
// return func(ctx context.Context) IO[int64] {
// return func() int64 {
// info, _ := f.Stat()
// return info.Size()
// }
// }
// })
//
// Use Cases:
// - Database connections: Acquire connection, execute queries, close connection
// - File handles: Open file, read/write, close file
// - Network connections: Establish connection, transfer data, close connection
// - Locks: Acquire lock, perform critical section, release lock
// - Temporary resources: Create temp file/directory, use it, clean up
//
//go:inline
func WithResource[A, B, ANY any](
onCreate ReaderIO[A], onRelease Kleisli[A, ANY]) Kleisli[Kleisli[A, B], B] {
return function.Bind13of3(Bracket[A, B, ANY])(onCreate, function.Ignore2of2[B](onRelease))
}

View File

@@ -0,0 +1,454 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerio
import (
"context"
"errors"
"testing"
"github.com/IBM/fp-go/v2/io"
"github.com/stretchr/testify/assert"
)
// mockResource simulates a resource that tracks its lifecycle
type mockResource struct {
id int
acquired bool
released bool
used bool
}
// TestBracket_Success tests that Bracket properly manages resource lifecycle on success
func TestBracket_Success(t *testing.T) {
resource := &mockResource{id: 1}
// Acquire resource
acquire := func(ctx context.Context) io.IO[*mockResource] {
return func() *mockResource {
resource.acquired = true
return resource
}
}
// Use resource
use := func(r *mockResource) ReaderIO[string] {
return func(ctx context.Context) io.IO[string] {
return func() string {
r.used = true
return "success"
}
}
}
// Release resource
release := func(r *mockResource, result string) ReaderIO[any] {
return func(ctx context.Context) io.IO[any] {
return func() any {
r.released = true
return nil
}
}
}
// Execute bracket
operation := Bracket(acquire, use, release)
result := operation(context.Background())()
// Verify lifecycle
assert.True(t, resource.acquired, "Resource should be acquired")
assert.True(t, resource.used, "Resource should be used")
assert.True(t, resource.released, "Resource should be released")
assert.Equal(t, "success", result)
}
// TestBracket_MultipleResources tests managing multiple resources
func TestBracket_MultipleResources(t *testing.T) {
resource1 := &mockResource{id: 1}
resource2 := &mockResource{id: 2}
acquire1 := func(ctx context.Context) io.IO[*mockResource] {
return func() *mockResource {
resource1.acquired = true
return resource1
}
}
use1 := func(r1 *mockResource) ReaderIO[*mockResource] {
return func(ctx context.Context) io.IO[*mockResource] {
return func() *mockResource {
r1.used = true
resource2.acquired = true
return resource2
}
}
}
release1 := func(r1 *mockResource, result string) ReaderIO[any] {
return func(ctx context.Context) io.IO[any] {
return func() any {
r1.released = true
return nil
}
}
}
// Nested bracket for second resource
use2 := func(r2 *mockResource) ReaderIO[string] {
return func(ctx context.Context) io.IO[string] {
return func() string {
r2.used = true
return "both used"
}
}
}
release2 := func(r2 *mockResource, result string) ReaderIO[any] {
return func(ctx context.Context) io.IO[any] {
return func() any {
r2.released = true
return nil
}
}
}
// Compose brackets
operation := Bracket(acquire1, func(r1 *mockResource) ReaderIO[string] {
return func(ctx context.Context) io.IO[string] {
r2 := use1(r1)(ctx)()
return Bracket(
func(ctx context.Context) io.IO[*mockResource] {
return func() *mockResource { return r2 }
},
use2,
release2,
)(ctx)
}
}, release1)
result := operation(context.Background())()
assert.True(t, resource1.acquired)
assert.True(t, resource1.used)
assert.True(t, resource1.released)
assert.True(t, resource2.acquired)
assert.True(t, resource2.used)
assert.True(t, resource2.released)
assert.Equal(t, "both used", result)
}
// TestWithResource_Success tests WithResource with successful operation
func TestWithResource_Success(t *testing.T) {
resource := &mockResource{id: 1}
// Define resource management
withResource := WithResource[*mockResource, string, any](
func(ctx context.Context) io.IO[*mockResource] {
return func() *mockResource {
resource.acquired = true
return resource
}
},
func(r *mockResource) ReaderIO[any] {
return func(ctx context.Context) io.IO[any] {
return func() any {
r.released = true
return nil
}
}
},
)
// Use resource
operation := withResource(func(r *mockResource) ReaderIO[string] {
return func(ctx context.Context) io.IO[string] {
return func() string {
r.used = true
return "result"
}
}
})
result := operation(context.Background())()
assert.True(t, resource.acquired)
assert.True(t, resource.used)
assert.True(t, resource.released)
assert.Equal(t, "result", result)
}
// TestWithResource_Reusability tests that WithResource can be reused with different operations
func TestWithResource_Reusability(t *testing.T) {
callCount := 0
withResource := WithResource[*mockResource, int, any](
func(ctx context.Context) io.IO[*mockResource] {
return func() *mockResource {
callCount++
return &mockResource{id: callCount, acquired: true}
}
},
func(r *mockResource) ReaderIO[any] {
return func(ctx context.Context) io.IO[any] {
return func() any {
r.released = true
return nil
}
}
},
)
// First operation
op1 := withResource(func(r *mockResource) ReaderIO[int] {
return func(ctx context.Context) io.IO[int] {
return func() int {
r.used = true
return r.id * 2
}
}
})
result1 := op1(context.Background())()
assert.Equal(t, 2, result1)
assert.Equal(t, 1, callCount)
// Second operation (should create new resource)
op2 := withResource(func(r *mockResource) ReaderIO[int] {
return func(ctx context.Context) io.IO[int] {
return func() int {
r.used = true
return r.id * 3
}
}
})
result2 := op2(context.Background())()
assert.Equal(t, 6, result2)
assert.Equal(t, 2, callCount)
}
// TestWithResource_DifferentResultTypes tests WithResource with different result types
func TestWithResource_DifferentResultTypes(t *testing.T) {
resource := &mockResource{id: 42}
withResourceInt := WithResource[*mockResource, int, any](
func(ctx context.Context) io.IO[*mockResource] {
return func() *mockResource {
resource.acquired = true
return resource
}
},
func(r *mockResource) ReaderIO[any] {
return func(ctx context.Context) io.IO[any] {
return func() any {
r.released = true
return nil
}
}
},
)
// Operation returning int
opInt := withResourceInt(func(r *mockResource) ReaderIO[int] {
return func(ctx context.Context) io.IO[int] {
return func() int {
return r.id
}
}
})
resultInt := opInt(context.Background())()
assert.Equal(t, 42, resultInt)
// Reset resource state
resource.acquired = false
resource.released = false
// Create new WithResource for string type
withResourceString := WithResource[*mockResource, string, any](
func(ctx context.Context) io.IO[*mockResource] {
return func() *mockResource {
resource.acquired = true
return resource
}
},
func(r *mockResource) ReaderIO[any] {
return func(ctx context.Context) io.IO[any] {
return func() any {
r.released = true
return nil
}
}
},
)
// Operation returning string
opString := withResourceString(func(r *mockResource) ReaderIO[string] {
return func(ctx context.Context) io.IO[string] {
return func() string {
return "value"
}
}
})
resultString := opString(context.Background())()
assert.Equal(t, "value", resultString)
assert.True(t, resource.released)
}
// TestWithResource_ContextPropagation tests that context is properly propagated
func TestWithResource_ContextPropagation(t *testing.T) {
type contextKey string
const key contextKey = "test-key"
withResource := WithResource[string, string, any](
func(ctx context.Context) io.IO[string] {
return func() string {
value := ctx.Value(key)
if value != nil {
return value.(string)
}
return "no-value"
}
},
func(r string) ReaderIO[any] {
return func(ctx context.Context) io.IO[any] {
return func() any {
return nil
}
}
},
)
operation := withResource(func(r string) ReaderIO[string] {
return func(ctx context.Context) io.IO[string] {
return func() string {
return r + "-processed"
}
}
})
ctx := context.WithValue(context.Background(), key, "test-value")
result := operation(ctx)()
assert.Equal(t, "test-value-processed", result)
}
// TestWithResource_ErrorInRelease tests behavior when release function encounters an error
func TestWithResource_ErrorInRelease(t *testing.T) {
resource := &mockResource{id: 1}
releaseError := errors.New("release failed")
withResource := WithResource[*mockResource, string, error](
func(ctx context.Context) io.IO[*mockResource] {
return func() *mockResource {
resource.acquired = true
return resource
}
},
func(r *mockResource) ReaderIO[error] {
return func(ctx context.Context) io.IO[error] {
return func() error {
r.released = true
return releaseError
}
}
},
)
operation := withResource(func(r *mockResource) ReaderIO[string] {
return func(ctx context.Context) io.IO[string] {
return func() string {
r.used = true
return "success"
}
}
})
result := operation(context.Background())()
// Operation should succeed even if release returns error
assert.Equal(t, "success", result)
assert.True(t, resource.acquired)
assert.True(t, resource.used)
assert.True(t, resource.released)
}
// BenchmarkBracket benchmarks the Bracket function
func BenchmarkBracket(b *testing.B) {
acquire := func(ctx context.Context) io.IO[int] {
return func() int {
return 42
}
}
use := func(n int) ReaderIO[int] {
return func(ctx context.Context) io.IO[int] {
return func() int {
return n * 2
}
}
}
release := func(n int, result int) ReaderIO[any] {
return func(ctx context.Context) io.IO[any] {
return func() any {
return nil
}
}
}
operation := Bracket(acquire, use, release)
ctx := context.Background()
b.ResetTimer()
for i := 0; i < b.N; i++ {
operation(ctx)()
}
}
// BenchmarkWithResource benchmarks the WithResource function
func BenchmarkWithResource(b *testing.B) {
withResource := WithResource[int, int, any](
func(ctx context.Context) io.IO[int] {
return func() int {
return 42
}
},
func(n int) ReaderIO[any] {
return func(ctx context.Context) io.IO[any] {
return func() any {
return nil
}
}
},
)
operation := withResource(func(n int) ReaderIO[int] {
return func(ctx context.Context) io.IO[int] {
return func() int {
return n * 2
}
}
})
ctx := context.Background()
b.ResetTimer()
for i := 0; i < b.N; i++ {
operation(ctx)()
}
}

View File

@@ -19,6 +19,9 @@ import (
"context"
"github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/io"
"github.com/IBM/fp-go/v2/pair"
RIO "github.com/IBM/fp-go/v2/readerio"
)
// Promap is the profunctor map operation that transforms both the input and output of a context-based ReaderIO.
@@ -33,21 +36,24 @@ import (
// The function f returns both a new context and a CancelFunc that should be called to release resources.
//
// Type Parameters:
// - R: The input environment type that f transforms into context.Context
// - A: The original result type produced by the ReaderIO
// - B: The new output result type
//
// Parameters:
// - f: Function to transform the input context (contravariant)
// - f: Function to transform the input environment R into context.Context (contravariant)
// - g: Function to transform the output value from A to B (covariant)
//
// Returns:
// - An Operator that takes a ReaderIO[A] and returns a ReaderIO[B]
// - A Kleisli arrow that takes a ReaderIO[A] and returns a function from R to B
//
// Note: When R is context.Context, this simplifies to an Operator[A, B]
//
//go:inline
func Promap[A, B any](f func(context.Context) (context.Context, context.CancelFunc), g func(A) B) Operator[A, B] {
func Promap[R, A, B any](f pair.Kleisli[context.CancelFunc, R, context.Context], g func(A) B) RIO.Kleisli[R, ReaderIO[A], B] {
return function.Flow2(
Local[A](f),
Map(g),
RIO.Map[R](g),
)
}
@@ -61,14 +67,87 @@ func Promap[A, B any](f func(context.Context) (context.Context, context.CancelFu
//
// Type Parameters:
// - A: The result type (unchanged)
// - R: The input environment type that f transforms into context.Context
//
// Parameters:
// - f: Function to transform the context, returning a new context and CancelFunc
// - f: Function to transform the input environment R into context.Context, returning a new context and CancelFunc
//
// Returns:
// - An Operator that takes a ReaderIO[A] and returns a ReaderIO[A]
// - A Kleisli arrow that takes a ReaderIO[A] and returns a function from R to A
//
// Note: When R is context.Context, this simplifies to an Operator[A, A]
//
//go:inline
func Contramap[A any](f func(context.Context) (context.Context, context.CancelFunc)) Operator[A, A] {
func Contramap[A, R any](f pair.Kleisli[context.CancelFunc, R, context.Context]) RIO.Kleisli[R, ReaderIO[A], A] {
return Local[A](f)
}
// LocalIOK transforms the context using an IO effect before passing it to a ReaderIO computation.
//
// This is similar to Local, but the context transformation itself is wrapped in an IO effect,
// allowing for side-effectful context transformations. The transformation function receives
// the current context and returns an IO effect that produces a new context along with a
// cancel function. The cancel function is automatically called when the computation completes
// (via defer), ensuring proper cleanup of resources.
//
// This is useful for:
// - Context transformations that require side effects (e.g., loading configuration)
// - Lazy initialization of context values
// - Context transformations that may fail or need to perform I/O
// - Composing effectful context setup with computations
//
// Type Parameters:
// - A: The value type of the ReaderIO
//
// Parameters:
// - f: An IO Kleisli arrow that transforms the context with side effects
//
// Returns:
// - An Operator that runs the computation with the effectfully transformed context
//
// Example:
//
// import (
// "context"
// G "github.com/IBM/fp-go/v2/io"
// F "github.com/IBM/fp-go/v2/function"
// )
//
// // Context transformation with side effects (e.g., loading config)
// loadConfig := func(ctx context.Context) G.IO[ContextCancel] {
// return func() ContextCancel {
// // Simulate loading configuration
// config := loadConfigFromFile()
// newCtx := context.WithValue(ctx, "config", config)
// return pair.MakePair[context.CancelFunc](func() {}, newCtx)
// }
// }
//
// getValue := readerio.FromReader(func(ctx context.Context) string {
// if cfg := ctx.Value("config"); cfg != nil {
// return cfg.(string)
// }
// return "default"
// })
//
// result := F.Pipe1(
// getValue,
// readerio.LocalIOK[string](loadConfig),
// )
// value := result(t.Context())() // Loads config and uses it
//
// Comparison with Local:
// - Local: Takes a pure function that transforms the context
// - LocalIOK: Takes an IO effect that transforms the context, allowing side effects
func LocalIOK[A any](f io.Kleisli[context.Context, ContextCancel]) Operator[A, A] {
return func(r ReaderIO[A]) ReaderIO[A] {
return func(ctx context.Context) IO[A] {
p := f(ctx)
return func() A {
otherCancel, otherCtx := pair.Unpack(p())
defer otherCancel()
return r(otherCtx)()
}
}
}
}

View File

@@ -21,6 +21,7 @@ import (
"testing"
"time"
"github.com/IBM/fp-go/v2/pair"
"github.com/stretchr/testify/assert"
)
@@ -38,9 +39,9 @@ func TestPromapBasic(t *testing.T) {
}
// Transform context and result
addKey := func(ctx context.Context) (context.Context, context.CancelFunc) {
addKey := func(ctx context.Context) ContextCancel {
newCtx := context.WithValue(ctx, "key", 42)
return newCtx, func() {}
return pair.MakePair[context.CancelFunc](func() {}, newCtx)
}
toString := strconv.Itoa
@@ -63,9 +64,9 @@ func TestContramapBasic(t *testing.T) {
}
}
addKey := func(ctx context.Context) (context.Context, context.CancelFunc) {
addKey := func(ctx context.Context) ContextCancel {
newCtx := context.WithValue(ctx, "key", 100)
return newCtx, func() {}
return pair.MakePair[context.CancelFunc](func() {}, newCtx)
}
adapted := Contramap[int](addKey)(getValue)
@@ -85,8 +86,9 @@ func TestLocalBasic(t *testing.T) {
}
}
addTimeout := func(ctx context.Context) (context.Context, context.CancelFunc) {
return context.WithTimeout(ctx, time.Second)
addTimeout := func(ctx context.Context) ContextCancel {
newCtx, cancelFct := context.WithTimeout(ctx, time.Second)
return pair.MakePair(cancelFct, newCtx)
}
adapted := Local[bool](addTimeout)(getValue)
@@ -95,3 +97,81 @@ func TestLocalBasic(t *testing.T) {
assert.True(t, result)
})
}
// TestLocalIOKBasic tests basic LocalIOK functionality
func TestLocalIOKBasic(t *testing.T) {
t.Run("context transformation with IO effect", func(t *testing.T) {
getValue := func(ctx context.Context) IO[string] {
return func() string {
if v := ctx.Value("key"); v != nil {
return v.(string)
}
return "default"
}
}
// Context transformation wrapped in IO effect
addKeyIO := func(ctx context.Context) IO[ContextCancel] {
return func() ContextCancel {
// Simulate side effect (e.g., loading config)
newCtx := context.WithValue(ctx, "key", "loaded-value")
return pair.MakePair[context.CancelFunc](func() {}, newCtx)
}
}
adapted := LocalIOK[string](addKeyIO)(getValue)
result := adapted(t.Context())()
assert.Equal(t, "loaded-value", result)
})
t.Run("cleanup function is called", func(t *testing.T) {
cleanupCalled := false
getValue := func(ctx context.Context) IO[int] {
return func() int {
if v := ctx.Value("value"); v != nil {
return v.(int)
}
return 0
}
}
addValueIO := func(ctx context.Context) IO[ContextCancel] {
return func() ContextCancel {
newCtx := context.WithValue(ctx, "value", 42)
cleanup := context.CancelFunc(func() {
cleanupCalled = true
})
return pair.MakePair(cleanup, newCtx)
}
}
adapted := LocalIOK[int](addValueIO)(getValue)
result := adapted(t.Context())()
assert.Equal(t, 42, result)
assert.True(t, cleanupCalled, "cleanup function should be called")
})
t.Run("works with timeout context", func(t *testing.T) {
getValue := func(ctx context.Context) IO[bool] {
return func() bool {
_, hasDeadline := ctx.Deadline()
return hasDeadline
}
}
addTimeoutIO := func(ctx context.Context) IO[ContextCancel] {
return func() ContextCancel {
newCtx, cancelFct := context.WithTimeout(ctx, time.Second)
return pair.MakePair(cancelFct, newCtx)
}
}
adapted := LocalIOK[bool](addTimeoutIO)(getValue)
result := adapted(t.Context())()
assert.True(t, result, "context should have deadline")
})
}

View File

@@ -20,6 +20,7 @@ import (
"time"
"github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/pair"
"github.com/IBM/fp-go/v2/reader"
RIO "github.com/IBM/fp-go/v2/readerio"
)
@@ -633,12 +634,15 @@ func ReadIO[A any](r IO[context.Context]) func(ReaderIO[A]) IO[A] {
//
// Type Parameters:
// - A: The value type of the ReaderIO
// - R: The input environment type that f transforms into context.Context
//
// Parameters:
// - f: A function that transforms the context and returns a cancel function
// - f: A function that transforms the input environment R into context.Context and returns a cancel function
//
// Returns:
// - An Operator that runs the computation with the transformed context
// - A Kleisli arrow that runs the computation with the transformed context
//
// Note: When R is context.Context, this simplifies to an Operator[A, A]
//
// Example:
//
@@ -648,9 +652,9 @@ func ReadIO[A any](r IO[context.Context]) func(ReaderIO[A]) IO[A] {
// type key int
// const userKey key = 0
//
// addUser := readerio.Local[string](func(ctx context.Context) (context.Context, context.CancelFunc) {
// addUser := readerio.Local[string, context.Context](func(ctx context.Context) pair.Pair[context.CancelFunc, context.Context] {
// newCtx := context.WithValue(ctx, userKey, "Alice")
// return newCtx, func() {} // No-op cancel
// return pair.MakePair(func() {}, newCtx) // No-op cancel
// })
//
// getUser := readerio.FromReader(func(ctx context.Context) string {
@@ -669,19 +673,20 @@ func ReadIO[A any](r IO[context.Context]) func(ReaderIO[A]) IO[A] {
// Timeout Example:
//
// // Add a 5-second timeout to a specific operation
// withTimeout := readerio.Local[Data](func(ctx context.Context) (context.Context, context.CancelFunc) {
// return context.WithTimeout(ctx, 5*time.Second)
// withTimeout := readerio.Local[Data, context.Context](func(ctx context.Context) pair.Pair[context.CancelFunc, context.Context] {
// newCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
// return pair.MakePair(cancel, newCtx)
// })
//
// result := F.Pipe1(
// fetchData,
// withTimeout,
// )
func Local[A any](f func(context.Context) (context.Context, context.CancelFunc)) Operator[A, A] {
return func(rr ReaderIO[A]) ReaderIO[A] {
return func(ctx context.Context) IO[A] {
func Local[A, R any](f pair.Kleisli[context.CancelFunc, R, context.Context]) RIO.Kleisli[R, ReaderIO[A], A] {
return func(rr ReaderIO[A]) RIO.ReaderIO[R, A] {
return func(r R) IO[A] {
return func() A {
otherCtx, otherCancel := f(ctx)
otherCancel, otherCtx := pair.Unpack(f(r))
defer otherCancel()
return rr(otherCtx)()
}
@@ -742,8 +747,9 @@ func Local[A any](f func(context.Context) (context.Context, context.CancelFunc))
// )
// data := result(t.Context())() // Returns Data{Value: "quick"}
func WithTimeout[A any](timeout time.Duration) Operator[A, A] {
return Local[A](func(ctx context.Context) (context.Context, context.CancelFunc) {
return context.WithTimeout(ctx, timeout)
return Local[A](func(ctx context.Context) ContextCancel {
newCtx, cancelFct := context.WithTimeout(ctx, timeout)
return pair.MakePair(cancelFct, newCtx)
})
}
@@ -806,8 +812,9 @@ func WithTimeout[A any](timeout time.Duration) Operator[A, A] {
// )
// data := result(parentCtx)() // Will use parent's 1-hour deadline
func WithDeadline[A any](deadline time.Time) Operator[A, A] {
return Local[A](func(ctx context.Context) (context.Context, context.CancelFunc) {
return context.WithDeadline(ctx, deadline)
return Local[A](func(ctx context.Context) ContextCancel {
newCtx, cancelFct := context.WithDeadline(ctx, deadline)
return pair.MakePair(cancelFct, newCtx)
})
}

View File

@@ -23,6 +23,7 @@ import (
"github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/io"
"github.com/IBM/fp-go/v2/lazy"
"github.com/IBM/fp-go/v2/pair"
"github.com/IBM/fp-go/v2/predicate"
"github.com/IBM/fp-go/v2/reader"
"github.com/IBM/fp-go/v2/readerio"
@@ -81,4 +82,15 @@ type (
Predicate[A any] = predicate.Predicate[A]
Void = function.Void
// Pair represents a tuple of two values of types A and B.
// It is used to group two related values together.
Pair[A, B any] = pair.Pair[A, B]
// ContextCancel represents a pair of a cancel function and a context.
// It is used in operations that create new contexts with cancellation capabilities.
//
// The first element is the CancelFunc that should be called to release resources.
// The second element is the new Context that was created.
ContextCancel = Pair[context.CancelFunc, context.Context]
)

View File

@@ -28,6 +28,7 @@ import (
"github.com/IBM/fp-go/v2/io"
"github.com/IBM/fp-go/v2/logging"
"github.com/IBM/fp-go/v2/option"
"github.com/IBM/fp-go/v2/pair"
"github.com/IBM/fp-go/v2/reader"
"github.com/IBM/fp-go/v2/result"
)
@@ -90,132 +91,7 @@ func withLoggingContext(lctx loggingContext) Endomorphism[context.Context] {
return F.Bind2nd(withLoggingContextValue, any(lctx))
}
// LogEntryExitF creates a customizable operator that wraps a ReaderIOResult computation with entry/exit callbacks.
//
// This is a more flexible version of LogEntryExit that allows you to provide custom callbacks for
// entry and exit events. The onEntry callback receives the current context and can return a modified
// context (e.g., with additional logging information). The onExit callback receives the computation
// result and can perform custom logging, metrics collection, or cleanup.
//
// The function uses the bracket pattern to ensure that:
// - The onEntry callback is executed before the computation starts
// - The computation runs with the context returned by onEntry
// - The onExit callback is executed after the computation completes (success or failure)
// - The original result is preserved and returned unchanged
// - Cleanup happens even if the computation fails
//
// Type Parameters:
// - A: The success type of the ReaderIOResult
// - ANY: The return type of the onExit callback (typically any)
//
// Parameters:
// - onEntry: A ReaderIO that receives the current context and returns a (possibly modified) context.
// This is executed before the computation starts. Use this for logging entry, adding context values,
// starting timers, or initialization logic.
// - onExit: A Kleisli function that receives the Result[A] and returns a ReaderIO[ANY].
// This is executed after the computation completes, regardless of success or failure.
// Use this for logging exit, recording metrics, cleanup, or finalization logic.
//
// Returns:
// - An Operator that wraps the ReaderIOResult computation with the custom entry/exit callbacks
//
// Example with custom context modification:
//
// type RequestID string
//
// logOp := LogEntryExitF[User, any](
// func(ctx context.Context) IO[context.Context] {
// return func() context.Context {
// reqID := RequestID(uuid.New().String())
// log.Printf("[%s] Starting operation", reqID)
// return context.WithValue(ctx, "requestID", reqID)
// }
// },
// func(res Result[User]) ReaderIO[any] {
// return func(ctx context.Context) IO[any] {
// return func() any {
// reqID := ctx.Value("requestID").(RequestID)
// return F.Pipe1(
// res,
// result.Fold(
// func(err error) any {
// log.Printf("[%s] Operation failed: %v", reqID, err)
// return nil
// },
// func(_ User) any {
// log.Printf("[%s] Operation succeeded", reqID)
// return nil
// },
// ),
// )
// }
// }
// },
// )
//
// wrapped := logOp(fetchUser(123))
//
// Example with metrics collection:
//
// import "github.com/prometheus/client_golang/prometheus"
//
// metricsOp := LogEntryExitF[Response, any](
// func(ctx context.Context) IO[context.Context] {
// return func() context.Context {
// requestCount.WithLabelValues("api_call", "started").Inc()
// return context.WithValue(ctx, "startTime", time.Now())
// }
// },
// func(res Result[Response]) ReaderIO[any] {
// return func(ctx context.Context) IO[any] {
// return func() any {
// startTime := ctx.Value("startTime").(time.Time)
// duration := time.Since(startTime).Seconds()
//
// return F.Pipe1(
// res,
// result.Fold(
// func(err error) any {
// requestCount.WithLabelValues("api_call", "error").Inc()
// requestDuration.WithLabelValues("api_call", "error").Observe(duration)
// return nil
// },
// func(_ Response) any {
// requestCount.WithLabelValues("api_call", "success").Inc()
// requestDuration.WithLabelValues("api_call", "success").Observe(duration)
// return nil
// },
// ),
// )
// }
// }
// },
// )
//
// Use Cases:
// - Custom context modification: Adding request IDs, trace IDs, or other context values
// - Structured logging: Integration with zap, logrus, or other structured loggers
// - Metrics collection: Recording operation durations, success/failure rates
// - Distributed tracing: OpenTelemetry, Jaeger integration
// - Custom monitoring: Application-specific monitoring and alerting
//
// Note: LogEntryExit is implemented using LogEntryExitF with standard logging and context management.
// Use LogEntryExitF when you need more control over the entry/exit behavior or context modification.
func LogEntryExitF[A, ANY any](
onEntry ReaderIO[context.Context],
onExit readerio.Kleisli[Result[A], ANY],
) Operator[A, A] {
bracket := F.Bind13of3(readerio.Bracket[context.Context, Result[A], ANY])(onEntry, func(newCtx context.Context, res Result[A]) ReaderIO[ANY] {
return readerio.FromIO(onExit(res)(newCtx)) // Get the exit callback for this result
})
return func(src ReaderIOResult[A]) ReaderIOResult[A] {
return bracket(F.Flow2(
src,
FromIOResult,
))
}
}
func noop() {}
// onEntry creates a ReaderIO that handles the entry logging for an operation.
// It generates a unique logging ID, captures the start time, and logs the entry message.
@@ -230,15 +106,15 @@ func LogEntryExitF[A, ANY any](
// - A ReaderIO that prepares the context with logging information and logs the entry
func onEntry(
logLevel slog.Level,
cb func(context.Context) *slog.Logger,
cb Reader[context.Context, *slog.Logger],
nameAttr slog.Attr,
) ReaderIO[context.Context] {
) ReaderIO[ContextCancel] {
return func(ctx context.Context) IO[context.Context] {
return func(ctx context.Context) IO[ContextCancel] {
// logger
logger := cb(ctx)
return func() context.Context {
return func() ContextCancel {
// check if the logger is enabled
if logger.Enabled(ctx, logLevel) {
// Generate unique logging ID and capture start time
@@ -258,19 +134,23 @@ func onEntry(
})
withLogger := logging.WithLogger(newLogger)
return withCtx(withLogger(ctx))
return F.Pipe2(
ctx,
withLogger,
pair.Map[context.CancelFunc](withCtx),
)
}
// logging disabled
withCtx := withLoggingContext(loggingContext{
logger: logger,
isEnabled: false,
})
return withCtx(ctx)
return pair.MakePair[context.CancelFunc](noop, withCtx(ctx))
}
}
}
// onExitAny creates a Kleisli function that handles exit logging for an operation.
// onExitVoid creates a Kleisli function that handles exit logging for an operation.
// It logs either success or error based on the Result, including the operation duration.
// Only logs if logging was enabled during entry (checked via loggingContext.isEnabled).
//
@@ -280,33 +160,33 @@ func onEntry(
//
// Returns:
// - A Kleisli function that logs the exit/error and returns nil
func onExitAny(
func onExitVoid(
logLevel slog.Level,
nameAttr slog.Attr,
) readerio.Kleisli[Result[any], any] {
return func(res Result[any]) ReaderIO[any] {
return func(ctx context.Context) IO[any] {
) readerio.Kleisli[Result[Void], Void] {
return func(res Result[Void]) ReaderIO[Void] {
return func(ctx context.Context) IO[Void] {
value := getLoggingContext(ctx)
if value.isEnabled {
return func() any {
return func() Void {
// Retrieve logging information from context
durationAttr := slog.Duration("duration", time.Since(value.startTime))
// Log error with ID and duration
onError := func(err error) any {
onError := func(err error) Void {
value.logger.LogAttrs(ctx, logLevel, "[throwing]",
nameAttr,
durationAttr,
slog.Any("error", err))
return nil
return F.VOID
}
// Log success with ID and duration
onSuccess := func(_ any) any {
onSuccess := func(v Void) Void {
value.logger.LogAttrs(ctx, logLevel, "[exiting ]", nameAttr, durationAttr)
return nil
return v
}
return F.Pipe1(
@@ -316,7 +196,7 @@ func onExitAny(
}
}
// nothing to do
return io.Of[any](nil)
return io.Of(F.VOID)
}
}
}
@@ -374,13 +254,21 @@ func LogEntryExitWithCallback[A any](
nameAttr := slog.String("name", name)
return LogEntryExitF(
entry := F.Pipe1(
onEntry(logLevel, cb, nameAttr),
F.Flow2(
result.MapTo[A, any](nil),
onExitAny(logLevel, nameAttr),
),
readerio.LocalIOK[Result[A]],
)
exit := readerio.Tap(F.Flow2(
result.MapTo[A](F.VOID),
onExitVoid(logLevel, nameAttr),
))
return F.Flow2(
exit,
entry,
)
}
// LogEntryExit creates an operator that logs the entry and exit of a ReaderIOResult computation with timing and correlation IDs.

View File

@@ -0,0 +1,415 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package readerioresult
import (
"bytes"
"errors"
"log/slog"
"strings"
"testing"
F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/logging"
N "github.com/IBM/fp-go/v2/number"
"github.com/IBM/fp-go/v2/result"
"github.com/stretchr/testify/assert"
)
// TestTapSLogComprehensive_Success verifies TapSLog logs successful values
func TestTapSLogComprehensive_Success(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
t.Run("logs integer success value", func(t *testing.T) {
buf.Reset()
pipeline := F.Pipe2(
Of(42),
TapSLog[int]("Integer value"),
Map(N.Mul(2)),
)
res := pipeline(t.Context())()
// Verify result is correct
assert.Equal(t, 84, F.Pipe1(res, getOrZero))
// Verify logging occurred
logOutput := buf.String()
assert.Contains(t, logOutput, "Integer value", "Should log the message")
assert.Contains(t, logOutput, "value=42", "Should log the success value")
assert.NotContains(t, logOutput, "error", "Should not contain error keyword for success")
})
t.Run("logs string success value", func(t *testing.T) {
buf.Reset()
pipeline := F.Pipe1(
Of("hello world"),
TapSLog[string]("String value"),
)
res := pipeline(t.Context())()
// Verify result is correct
assert.True(t, F.Pipe1(res, isRight[string]))
// Verify logging occurred
logOutput := buf.String()
assert.Contains(t, logOutput, "String value")
assert.Contains(t, logOutput, `value="hello world"`)
})
t.Run("logs struct success value", func(t *testing.T) {
buf.Reset()
type User struct {
ID int
Name string
}
user := User{ID: 123, Name: "Alice"}
pipeline := F.Pipe1(
Of(user),
TapSLog[User]("User struct"),
)
res := pipeline(t.Context())()
// Verify result is correct
assert.True(t, F.Pipe1(res, isRight[User]))
// Verify logging occurred with struct fields
logOutput := buf.String()
assert.Contains(t, logOutput, "User struct")
assert.Contains(t, logOutput, "ID:123")
assert.Contains(t, logOutput, "Name:Alice")
})
t.Run("logs multiple success values in pipeline", func(t *testing.T) {
buf.Reset()
step1 := F.Pipe2(
Of(10),
TapSLog[int]("Initial value"),
Map(N.Mul(2)),
)
pipeline := F.Pipe2(
step1,
TapSLog[int]("After doubling"),
Map(N.Add(5)),
)
res := pipeline(t.Context())()
// Verify result is correct
assert.Equal(t, 25, getOrZero(res))
// Verify both log entries
logOutput := buf.String()
assert.Contains(t, logOutput, "Initial value")
assert.Contains(t, logOutput, "value=10")
assert.Contains(t, logOutput, "After doubling")
assert.Contains(t, logOutput, "value=20")
})
}
// TestTapSLogComprehensive_Error verifies TapSLog behavior with errors
func TestTapSLogComprehensive_Error(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
t.Run("logs error values", func(t *testing.T) {
buf.Reset()
testErr := errors.New("test error")
pipeline := F.Pipe2(
Left[int](testErr),
TapSLog[int]("Error case"),
Map(N.Mul(2)),
)
res := pipeline(t.Context())()
// Verify error is preserved
assert.True(t, F.Pipe1(res, isLeft[int]))
// Verify logging occurred for error
logOutput := buf.String()
assert.Contains(t, logOutput, "Error case", "Should log the message")
assert.Contains(t, logOutput, "error", "Should contain error keyword")
assert.Contains(t, logOutput, "test error", "Should log the error message")
assert.NotContains(t, logOutput, "value=", "Should not log 'value=' for errors")
})
t.Run("preserves error through pipeline", func(t *testing.T) {
buf.Reset()
originalErr := errors.New("original error")
step1 := F.Pipe2(
Left[int](originalErr),
TapSLog[int]("First tap"),
Map(N.Mul(2)),
)
pipeline := F.Pipe2(
step1,
TapSLog[int]("Second tap"),
Map(N.Add(5)),
)
res := pipeline(t.Context())()
// Verify error is preserved
assert.True(t, isLeft(res))
// Verify both taps logged the error
logOutput := buf.String()
errorCount := strings.Count(logOutput, "original error")
assert.Equal(t, 2, errorCount, "Both TapSLog calls should log the error")
assert.Contains(t, logOutput, "First tap")
assert.Contains(t, logOutput, "Second tap")
})
t.Run("logs error after successful operation", func(t *testing.T) {
buf.Reset()
pipeline := F.Pipe3(
Of(10),
TapSLog[int]("Before error"),
Chain(func(n int) ReaderIOResult[int] {
return Left[int](errors.New("chain error"))
}),
TapSLog[int]("After error"),
)
res := pipeline(t.Context())()
// Verify error is present
assert.True(t, F.Pipe1(res, isLeft[int]))
// Verify both logs
logOutput := buf.String()
assert.Contains(t, logOutput, "Before error")
assert.Contains(t, logOutput, "value=10")
assert.Contains(t, logOutput, "After error")
assert.Contains(t, logOutput, "chain error")
})
}
// TestTapSLogComprehensive_EdgeCases verifies TapSLog with edge cases
func TestTapSLogComprehensive_EdgeCases(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
t.Run("logs zero value", func(t *testing.T) {
buf.Reset()
pipeline := F.Pipe1(
Of(0),
TapSLog[int]("Zero value"),
)
res := pipeline(t.Context())()
assert.Equal(t, 0, F.Pipe1(res, getOrZero))
logOutput := buf.String()
assert.Contains(t, logOutput, "Zero value")
assert.Contains(t, logOutput, "value=0")
})
t.Run("logs empty string", func(t *testing.T) {
buf.Reset()
pipeline := F.Pipe1(
Of(""),
TapSLog[string]("Empty string"),
)
res := pipeline(t.Context())()
assert.True(t, F.Pipe1(res, isRight[string]))
logOutput := buf.String()
assert.Contains(t, logOutput, "Empty string")
assert.Contains(t, logOutput, `value=""`)
})
t.Run("logs nil pointer", func(t *testing.T) {
buf.Reset()
type Data struct {
Value string
}
var nilData *Data
pipeline := F.Pipe1(
Of(nilData),
TapSLog[*Data]("Nil pointer"),
)
res := pipeline(t.Context())()
assert.True(t, F.Pipe1(res, isRight[*Data]))
logOutput := buf.String()
assert.Contains(t, logOutput, "Nil pointer")
// Nil representation may vary, but should be logged
assert.NotEmpty(t, logOutput)
})
t.Run("respects logger level - disabled", func(t *testing.T) {
buf.Reset()
// Create logger that only logs errors
errorLogger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelError,
}))
oldLogger := logging.SetLogger(errorLogger)
defer logging.SetLogger(oldLogger)
pipeline := F.Pipe1(
Of(42),
TapSLog[int]("Should not log"),
)
res := pipeline(t.Context())()
assert.Equal(t, 42, F.Pipe1(res, getOrZero))
// Should have no logs since level is ERROR
logOutput := buf.String()
assert.Empty(t, logOutput, "Should not log when level is disabled")
})
}
// TestTapSLogComprehensive_Integration verifies TapSLog in realistic scenarios
func TestTapSLogComprehensive_Integration(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
t.Run("complex pipeline with mixed success and error", func(t *testing.T) {
buf.Reset()
// Simulate a data processing pipeline
validatePositive := func(n int) ReaderIOResult[int] {
if n > 0 {
return Of(n)
}
return Left[int](errors.New("number must be positive"))
}
step1 := F.Pipe3(
Of(5),
TapSLog[int]("Input received"),
Map(N.Mul(2)),
TapSLog[int]("After multiplication"),
)
pipeline := F.Pipe2(
step1,
Chain(validatePositive),
TapSLog[int]("After validation"),
)
res := pipeline(t.Context())()
assert.Equal(t, 10, getOrZero(res))
logOutput := buf.String()
assert.Contains(t, logOutput, "Input received")
assert.Contains(t, logOutput, "value=5")
assert.Contains(t, logOutput, "After multiplication")
assert.Contains(t, logOutput, "value=10")
assert.Contains(t, logOutput, "After validation")
assert.Contains(t, logOutput, "value=10")
})
t.Run("error propagation with logging", func(t *testing.T) {
buf.Reset()
validatePositive := func(n int) ReaderIOResult[int] {
if n > 0 {
return Of(n)
}
return Left[int](errors.New("number must be positive"))
}
step1 := F.Pipe3(
Of(-5),
TapSLog[int]("Input received"),
Map(N.Mul(2)),
TapSLog[int]("After multiplication"),
)
pipeline := F.Pipe2(
step1,
Chain(validatePositive),
TapSLog[int]("After validation"),
)
res := pipeline(t.Context())()
assert.True(t, isLeft(res))
logOutput := buf.String()
// First two taps should log success
assert.Contains(t, logOutput, "Input received")
assert.Contains(t, logOutput, "value=-5")
assert.Contains(t, logOutput, "After multiplication")
assert.Contains(t, logOutput, "value=-10")
// Last tap should log error
assert.Contains(t, logOutput, "After validation")
assert.Contains(t, logOutput, "number must be positive")
})
}
// Helper functions for tests
func getOrZero(res Result[int]) int {
val, err := result.Unwrap(res)
if err == nil {
return val
}
return 0
}
func isRight[A any](res Result[A]) bool {
return result.IsRight(res)
}
func isLeft[A any](res Result[A]) bool {
return result.IsLeft(res)
}

View File

@@ -13,6 +13,7 @@ import (
F "github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/logging"
N "github.com/IBM/fp-go/v2/number"
"github.com/IBM/fp-go/v2/pair"
"github.com/IBM/fp-go/v2/result"
S "github.com/IBM/fp-go/v2/string"
"github.com/stretchr/testify/assert"
@@ -53,6 +54,11 @@ func TestLogEntryExitSuccess(t *testing.T) {
assert.Contains(t, logOutput, "TestOperation")
assert.Contains(t, logOutput, "ID=")
assert.Contains(t, logOutput, "duration=")
// Verify entry log appears before exit log
enteringIdx := strings.Index(logOutput, "[entering]")
exitingIdx := strings.Index(logOutput, "[exiting ]")
assert.Greater(t, exitingIdx, enteringIdx, "Exit log should appear after entry log")
}
// TestLogEntryExitError tests error operation logging
@@ -81,6 +87,11 @@ func TestLogEntryExitError(t *testing.T) {
assert.Contains(t, logOutput, "test error")
assert.Contains(t, logOutput, "ID=")
assert.Contains(t, logOutput, "duration=")
// Verify entry log appears before error log
enteringIdx := strings.Index(logOutput, "[entering]")
throwingIdx := strings.Index(logOutput, "[throwing]")
assert.Greater(t, throwingIdx, enteringIdx, "Error log should appear after entry log")
}
// TestLogEntryExitNested tests nested operations with different IDs
@@ -119,6 +130,48 @@ func TestLogEntryExitNested(t *testing.T) {
exitCount := strings.Count(logOutput, "[exiting ]")
assert.Equal(t, 2, enterCount, "Should have 2 entering logs")
assert.Equal(t, 2, exitCount, "Should have 2 exiting logs")
// Verify log ordering: Each operation logs entry before exit
// Note: Due to Chain semantics, OuterOp completes before InnerOp starts
lines := strings.Split(logOutput, "\n")
var logSequence []string
for _, line := range lines {
if strings.Contains(line, "OuterOp") && strings.Contains(line, "[entering]") {
logSequence = append(logSequence, "OuterOp-entering")
} else if strings.Contains(line, "OuterOp") && strings.Contains(line, "[exiting ]") {
logSequence = append(logSequence, "OuterOp-exiting")
} else if strings.Contains(line, "InnerOp") && strings.Contains(line, "[entering]") {
logSequence = append(logSequence, "InnerOp-entering")
} else if strings.Contains(line, "InnerOp") && strings.Contains(line, "[exiting ]") {
logSequence = append(logSequence, "InnerOp-exiting")
}
}
// Verify each operation's entry comes before its exit
assert.Equal(t, 4, len(logSequence), "Should have 4 log entries")
// Find indices
outerEnterIdx := -1
outerExitIdx := -1
innerEnterIdx := -1
innerExitIdx := -1
for i, log := range logSequence {
switch log {
case "OuterOp-entering":
outerEnterIdx = i
case "OuterOp-exiting":
outerExitIdx = i
case "InnerOp-entering":
innerEnterIdx = i
case "InnerOp-exiting":
innerExitIdx = i
}
}
// Verify entry before exit for each operation
assert.Greater(t, outerExitIdx, outerEnterIdx, "OuterOp exit should come after OuterOp entry")
assert.Greater(t, innerExitIdx, innerEnterIdx, "InnerOp exit should come after InnerOp entry")
}
// TestLogEntryExitWithCallback tests custom log level and callback
@@ -172,76 +225,6 @@ func TestLogEntryExitDisabled(t *testing.T) {
assert.Empty(t, logOutput, "Should have no logs when logging is disabled")
}
// TestLogEntryExitF tests custom entry/exit callbacks
func TestLogEntryExitF(t *testing.T) {
var entryCount, exitCount int
onEntry := func(ctx context.Context) IO[context.Context] {
return func() context.Context {
entryCount++
return ctx
}
}
onExit := func(res Result[string]) ReaderIO[any] {
return func(ctx context.Context) IO[any] {
return func() any {
exitCount++
return nil
}
}
}
operation := F.Pipe1(
Of("test"),
LogEntryExitF(onEntry, onExit),
)
res := operation(t.Context())()
assert.True(t, result.IsRight(res))
assert.Equal(t, 1, entryCount, "Entry callback should be called once")
assert.Equal(t, 1, exitCount, "Exit callback should be called once")
}
// TestLogEntryExitFWithError tests custom callbacks with error
func TestLogEntryExitFWithError(t *testing.T) {
var entryCount, exitCount int
var capturedError error
onEntry := func(ctx context.Context) IO[context.Context] {
return func() context.Context {
entryCount++
return ctx
}
}
onExit := func(res Result[string]) ReaderIO[any] {
return func(ctx context.Context) IO[any] {
return func() any {
exitCount++
if result.IsLeft(res) {
_, capturedError = result.Unwrap(res)
}
return nil
}
}
}
testErr := errors.New("custom error")
operation := F.Pipe1(
Left[string](testErr),
LogEntryExitF(onEntry, onExit),
)
res := operation(t.Context())()
assert.True(t, result.IsLeft(res))
assert.Equal(t, 1, entryCount, "Entry callback should be called once")
assert.Equal(t, 1, exitCount, "Exit callback should be called once")
assert.Equal(t, testErr, capturedError, "Should capture the error")
}
// TestLoggingIDUniqueness tests that logging IDs are unique
func TestLoggingIDUniqueness(t *testing.T) {
var buf bytes.Buffer
@@ -287,7 +270,8 @@ func TestLogEntryExitWithContextLogger(t *testing.T) {
Level: slog.LevelInfo,
}))
ctx := logging.WithLogger(contextLogger)(t.Context())
cancelFct, ctx := pair.Unpack(logging.WithLogger(contextLogger)(t.Context()))
defer cancelFct()
operation := F.Pipe1(
Of("context value"),
@@ -546,7 +530,8 @@ func TestTapSLogWithContextLogger(t *testing.T) {
Level: slog.LevelInfo,
}))
ctx := logging.WithLogger(contextLogger)(t.Context())
cancelFct, ctx := pair.Unpack(logging.WithLogger(contextLogger)(t.Context()))
defer cancelFct()
operation := F.Pipe2(
Of("test value"),
@@ -660,3 +645,138 @@ func TestSLogWithCallbackLogsError(t *testing.T) {
assert.Contains(t, logOutput, "warning error")
assert.Contains(t, logOutput, "level=WARN")
}
// TestTapSLogPreservesResult tests that TapSLog doesn't modify the result
func TestTapSLogPreservesResult(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
// Test with success value
successOp := F.Pipe2(
Of(42),
TapSLog[int]("Success value"),
Map(N.Mul(2)),
)
successRes := successOp(t.Context())()
assert.Equal(t, result.Of(84), successRes)
// Test with error value
testErr := errors.New("test error")
errorOp := F.Pipe2(
Left[int](testErr),
TapSLog[int]("Error value"),
Map(N.Mul(2)),
)
errorRes := errorOp(t.Context())()
assert.True(t, result.IsLeft(errorRes))
// Verify the error is preserved
_, err := result.Unwrap(errorRes)
assert.Equal(t, testErr, err)
}
// TestTapSLogChainBehavior tests that TapSLog properly chains with other operations
func TestTapSLogChainBehavior(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
// Create a pipeline with multiple TapSLog calls
step1 := F.Pipe2(
Of(1),
TapSLog[int]("Step 1"),
Map(N.Mul(2)),
)
step2 := F.Pipe2(
step1,
TapSLog[int]("Step 2"),
Map(N.Mul(3)),
)
pipeline := F.Pipe1(
step2,
TapSLog[int]("Step 3"),
)
res := pipeline(t.Context())()
assert.Equal(t, result.Of(6), res)
logOutput := buf.String()
// Verify all steps were logged
assert.Contains(t, logOutput, "Step 1")
assert.Contains(t, logOutput, "value=1")
assert.Contains(t, logOutput, "Step 2")
assert.Contains(t, logOutput, "value=2")
assert.Contains(t, logOutput, "Step 3")
assert.Contains(t, logOutput, "value=6")
}
// TestTapSLogWithNilValue tests TapSLog with nil pointer values
func TestTapSLogWithNilValue(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
type Data struct {
Value string
}
// Test with nil pointer
var nilData *Data
operation := F.Pipe1(
Of(nilData),
TapSLog[*Data]("Nil pointer value"),
)
res := operation(t.Context())()
assert.True(t, result.IsRight(res))
logOutput := buf.String()
assert.Contains(t, logOutput, "Nil pointer value")
// The exact representation of nil may vary, but it should be logged
assert.NotEmpty(t, logOutput)
}
// TestTapSLogLogsErrors verifies that TapSLog DOES log errors
// TapSLog uses SLog internally, which logs both success values and errors
func TestTapSLogLogsErrors(t *testing.T) {
var buf bytes.Buffer
logger := slog.New(slog.NewTextHandler(&buf, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))
oldLogger := logging.SetLogger(logger)
defer logging.SetLogger(oldLogger)
testErr := errors.New("test error message")
pipeline := F.Pipe2(
Left[int](testErr),
TapSLog[int]("Error logging test"),
Map(N.Mul(2)),
)
res := pipeline(t.Context())()
// Verify the error is preserved
assert.True(t, result.IsLeft(res))
// Verify logging occurred for the error
logOutput := buf.String()
assert.NotEmpty(t, logOutput, "TapSLog should log when the Result is an error")
assert.Contains(t, logOutput, "Error logging test")
assert.Contains(t, logOutput, "error")
assert.Contains(t, logOutput, "test error message")
}

View File

@@ -22,6 +22,7 @@ import (
"github.com/IBM/fp-go/v2/io"
"github.com/IBM/fp-go/v2/ioresult"
"github.com/IBM/fp-go/v2/pair"
RIOR "github.com/IBM/fp-go/v2/readerioresult"
"github.com/IBM/fp-go/v2/result"
)
@@ -38,21 +39,24 @@ import (
// The error type is fixed as error and remains unchanged through the transformation.
//
// Type Parameters:
// - R: The input environment type that f transforms into context.Context
// - A: The original success type produced by the ReaderIOResult
// - B: The new output success type
//
// Parameters:
// - f: Function to transform the input context (contravariant)
// - f: Function to transform the input environment R into context.Context (contravariant)
// - g: Function to transform the output success value from A to B (covariant)
//
// Returns:
// - An Operator that takes a ReaderIOResult[A] and returns a ReaderIOResult[B]
// - A Kleisli arrow that takes a ReaderIOResult[A] and returns a function from R to B
//
// Note: When R is context.Context, this simplifies to an Operator[A, B]
//
//go:inline
func Promap[A, B any](f pair.Kleisli[context.CancelFunc, context.Context, context.Context], g func(A) B) Operator[A, B] {
func Promap[R, A, B any](f pair.Kleisli[context.CancelFunc, R, context.Context], g func(A) B) RIOR.Kleisli[R, ReaderIOResult[A], B] {
return function.Flow2(
Local[A](f),
Map(g),
RIOR.Map[R](g),
)
}
@@ -66,18 +70,41 @@ func Promap[A, B any](f pair.Kleisli[context.CancelFunc, context.Context, contex
//
// Type Parameters:
// - A: The success type (unchanged)
// - R: The input environment type that f transforms into context.Context
//
// Parameters:
// - f: Function to transform the context, returning a new context and CancelFunc
// - f: Function to transform the input environment R into context.Context, returning a new context and CancelFunc
//
// Returns:
// - A Kleisli arrow that takes a ReaderIOResult[A] and returns a function from R to A
//
// Note: When R is context.Context, this simplifies to an Operator[A, A]
//
//go:inline
func Contramap[A, R any](f pair.Kleisli[context.CancelFunc, R, context.Context]) RIOR.Kleisli[R, ReaderIOResult[A], A] {
return Local[A](f)
}
// ContramapIOK changes the context during the execution of a ReaderIOResult using an IO effect.
// This is the contravariant functor operation with IO effects.
//
// ContramapIOK is an alias for LocalIOK and is useful for adapting a ReaderIOResult to work with
// a modified context when the transformation itself requires side effects.
//
// Type Parameters:
// - A: The success type (unchanged)
//
// Parameters:
// - f: An IO Kleisli arrow that transforms the context with side effects
//
// Returns:
// - An Operator that takes a ReaderIOResult[A] and returns a ReaderIOResult[A]
//
// See Also:
// - Contramap: For pure context transformations
// - LocalIOK: The underlying implementation
//
//go:inline
func Contramap[A any](f pair.Kleisli[context.CancelFunc, context.Context, context.Context]) Operator[A, A] {
return Local[A](f)
}
func ContramapIOK[A any](f io.Kleisli[context.Context, ContextCancel]) Operator[A, A] {
return LocalIOK[A](f)
}
@@ -189,8 +216,6 @@ func LocalIOK[A any](f io.Kleisli[context.Context, ContextCancel]) Operator[A, A
//
// - Local: For pure context transformations
// - LocalIOK: For context transformations with side effects that cannot fail
//
//go:inline
func LocalIOResultK[A any](f ioresult.Kleisli[context.Context, ContextCancel]) Operator[A, A] {
return func(rr ReaderIOResult[A]) ReaderIOResult[A] {
return func(ctx context.Context) IOResult[A] {

View File

@@ -77,6 +77,105 @@ func TestContramapBasic(t *testing.T) {
})
}
// TestContramapIOK tests ContramapIOK functionality
func TestContramapIOK(t *testing.T) {
t.Run("transforms context with IO effect", func(t *testing.T) {
getValue := func(ctx context.Context) IOResult[string] {
return func() R.Result[string] {
if v := ctx.Value("requestID"); v != nil {
return R.Of(v.(string))
}
return R.Of("no-id")
}
}
addRequestID := func(ctx context.Context) io.IO[ContextCancel] {
return func() ContextCancel {
// Simulate generating a request ID via side effect
requestID := "req-12345"
newCtx := context.WithValue(ctx, "requestID", requestID)
return pair.MakePair(context.CancelFunc(func() {}), newCtx)
}
}
adapted := ContramapIOK[string](addRequestID)(getValue)
result := adapted(t.Context())()
assert.Equal(t, R.Of("req-12345"), result)
})
t.Run("preserves value type", func(t *testing.T) {
getValue := func(ctx context.Context) IOResult[int] {
return func() R.Result[int] {
if v := ctx.Value("counter"); v != nil {
return R.Of(v.(int))
}
return R.Of(0)
}
}
addCounter := func(ctx context.Context) io.IO[ContextCancel] {
return func() ContextCancel {
newCtx := context.WithValue(ctx, "counter", 999)
return pair.MakePair(context.CancelFunc(func() {}), newCtx)
}
}
adapted := ContramapIOK[int](addCounter)(getValue)
result := adapted(t.Context())()
assert.Equal(t, R.Of(999), result)
})
t.Run("calls cancel function", func(t *testing.T) {
cancelCalled := false
getValue := func(ctx context.Context) IOResult[string] {
return func() R.Result[string] {
return R.Of("test")
}
}
addData := func(ctx context.Context) io.IO[ContextCancel] {
return func() ContextCancel {
newCtx := context.WithValue(ctx, "data", "value")
cancelFunc := context.CancelFunc(func() {
cancelCalled = true
})
return pair.MakePair(cancelFunc, newCtx)
}
}
adapted := ContramapIOK[string](addData)(getValue)
_ = adapted(t.Context())()
assert.True(t, cancelCalled, "cancel function should be called")
})
t.Run("handles cancelled context", func(t *testing.T) {
getValue := func(ctx context.Context) IOResult[string] {
return func() R.Result[string] {
return R.Of("should not reach here")
}
}
addData := func(ctx context.Context) io.IO[ContextCancel] {
return func() ContextCancel {
newCtx := context.WithValue(ctx, "data", "value")
return pair.MakePair(context.CancelFunc(func() {}), newCtx)
}
}
ctx, cancel := context.WithCancel(t.Context())
cancel()
adapted := ContramapIOK[string](addData)(getValue)
result := adapted(ctx)()
assert.True(t, R.IsLeft(result))
})
}
// TestLocalBasic tests basic Local functionality
func TestLocalBasic(t *testing.T) {
t.Run("adds value to context", func(t *testing.T) {

View File

@@ -32,7 +32,6 @@ import (
"github.com/IBM/fp-go/v2/reader"
RIOR "github.com/IBM/fp-go/v2/readerioresult"
"github.com/IBM/fp-go/v2/readeroption"
"github.com/IBM/fp-go/v2/result"
)
const (
@@ -1011,12 +1010,15 @@ func TapLeftIOK[A, B any](f io.Kleisli[error, B]) Operator[A, A] {
//
// Type Parameters:
// - A: The value type of the ReaderIOResult
// - R: The input environment type that f transforms into context.Context
//
// Parameters:
// - f: A function that transforms the context and returns a cancel function
// - f: A function that transforms the input environment R into context.Context and returns a cancel function
//
// Returns:
// - An Operator that runs the computation with the transformed context
// - A Kleisli arrow that runs the computation with the transformed context
//
// Note: When R is context.Context, this simplifies to an Operator[A, A]
//
// Example:
//
@@ -1026,9 +1028,9 @@ func TapLeftIOK[A, B any](f io.Kleisli[error, B]) Operator[A, A] {
// type key int
// const userKey key = 0
//
// addUser := readerioresult.Local[string](func(ctx context.Context) (context.Context, context.CancelFunc) {
// addUser := readerioresult.Local[string, context.Context](func(ctx context.Context) pair.Pair[context.CancelFunc, context.Context] {
// newCtx := context.WithValue(ctx, userKey, "Alice")
// return newCtx, func() {} // No-op cancel
// return pair.MakePair(func() {}, newCtx) // No-op cancel
// })
//
// getUser := readerioresult.FromReader(func(ctx context.Context) string {
@@ -1047,27 +1049,19 @@ func TapLeftIOK[A, B any](f io.Kleisli[error, B]) Operator[A, A] {
// Timeout Example:
//
// // Add a 5-second timeout to a specific operation
// withTimeout := readerioresult.Local[Data](func(ctx context.Context) (context.Context, context.CancelFunc) {
// return context.WithTimeout(ctx, 5*time.Second)
// withTimeout := readerioresult.Local[Data, context.Context](func(ctx context.Context) pair.Pair[context.CancelFunc, context.Context] {
// newCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
// return pair.MakePair(cancel, newCtx)
// })
//
// result := F.Pipe1(
// fetchData,
// withTimeout,
// )
func Local[A any](f pair.Kleisli[context.CancelFunc, context.Context, context.Context]) Operator[A, A] {
return func(rr ReaderIOResult[A]) ReaderIOResult[A] {
return func(ctx context.Context) IOResult[A] {
return func() Result[A] {
if ctx.Err() != nil {
return result.Left[A](context.Cause(ctx))
}
otherCancel, otherCtx := pair.Unpack(f(ctx))
defer otherCancel()
return rr(otherCtx)()
}
}
}
//
//go:inline
func Local[A, R any](f pair.Kleisli[context.CancelFunc, R, context.Context]) RIOR.Kleisli[R, ReaderIOResult[A], A] {
return readerio.Local[Result[A]](f)
}
// WithTimeout adds a timeout to the context for a ReaderIOResult computation.

View File

@@ -3,6 +3,7 @@ package readerreaderioresult
import (
"context"
"github.com/IBM/fp-go/v2/context/reader"
"github.com/IBM/fp-go/v2/context/readerioresult"
"github.com/IBM/fp-go/v2/io"
"github.com/IBM/fp-go/v2/ioresult"
@@ -13,6 +14,17 @@ import (
// Local modifies the outer environment before passing it to a computation.
// Useful for providing different configurations to sub-computations.
//
// Type Parameters:
// - A: The success type produced by the ReaderReaderIOResult
// - R1: The original outer environment type expected by the ReaderReaderIOResult
// - R2: The new input outer environment type
//
// Parameters:
// - f: A function that transforms R2 to R1
//
// Returns:
// - A function that takes a ReaderReaderIOResult[R1, A] and returns a ReaderReaderIOResult[R2, A]
//
//go:inline
func Local[A, R1, R2 any](f func(R2) R1) func(ReaderReaderIOResult[R1, A]) ReaderReaderIOResult[R2, A] {
return RRIOE.Local[context.Context, error, A](f)
@@ -102,6 +114,29 @@ func LocalIOResultK[A, R1, R2 any](f ioresult.Kleisli[R2, R1]) func(ReaderReader
return RRIOE.LocalIOEitherK[context.Context, A](f)
}
// LocalResultK transforms the outer environment of a ReaderReaderIOResult using a Result-based Kleisli arrow.
// It allows you to modify the outer environment through a pure computation that can fail before
// passing it to the ReaderReaderIOResult.
//
// This is useful when the outer environment transformation is a pure computation that can fail,
// such as parsing, validation, or data transformation that doesn't require IO effects.
//
// The transformation happens in two stages:
// 1. The Result function f is executed with the R2 environment to produce Result[R1]
// 2. If successful (Ok), the R1 value is passed as the outer environment to the ReaderReaderIOResult[R1, A]
// 3. If failed (Err), the error is propagated without executing the ReaderReaderIOResult
//
// Type Parameters:
// - A: The success type produced by the ReaderReaderIOResult
// - R1: The original outer environment type expected by the ReaderReaderIOResult
// - R2: The new input outer environment type
//
// Parameters:
// - f: A Result Kleisli arrow that transforms R2 to R1 with pure computation that can fail
//
// Returns:
// - A function that takes a ReaderReaderIOResult[R1, A] and returns a ReaderReaderIOResult[R2, A]
//
//go:inline
func LocalResultK[A, R1, R2 any](f result.Kleisli[R2, R1]) func(ReaderReaderIOResult[R1, A]) ReaderReaderIOResult[R2, A] {
return RRIOE.LocalEitherK[context.Context, A](f)
@@ -162,6 +197,90 @@ func LocalReaderIOResultK[A, R1, R2 any](f readerioresult.Kleisli[R2, R1]) func(
return RRIOE.LocalReaderIOEitherK[A](f)
}
// LocalReaderK transforms the outer environment of a ReaderReaderIOResult using a Reader-based Kleisli arrow.
// It allows you to modify the outer environment through a pure computation that depends on the inner context
// before passing it to the ReaderReaderIOResult.
//
// This is useful when the outer environment transformation is a pure computation that requires access
// to the inner context (e.g., context.Context) but cannot fail. Common use cases include:
// - Extracting configuration from context values
// - Computing derived environment values based on context
// - Transforming environment based on context metadata
//
// The transformation happens in two stages:
// 1. The Reader function f is executed with the R2 outer environment and inner context to produce an R1 value
// 2. The resulting R1 value is passed as the outer environment to the ReaderReaderIOResult[R1, A]
//
// Type Parameters:
// - A: The success type produced by the ReaderReaderIOResult
// - R1: The original outer environment type expected by the ReaderReaderIOResult
// - R2: The new input outer environment type
//
// Parameters:
// - f: A Reader Kleisli arrow that transforms R2 to R1 using the inner context
//
// Returns:
// - A function that takes a ReaderReaderIOResult[R1, A] and returns a ReaderReaderIOResult[R2, A]
//
// Example Usage:
//
// type ctxKey string
// const configKey ctxKey = "config"
//
// // Extract config from context and transform environment
// extractConfig := func(path string) reader.Reader[DetailedConfig] {
// return func(ctx context.Context) DetailedConfig {
// if cfg, ok := ctx.Value(configKey).(DetailedConfig); ok {
// return cfg
// }
// return DetailedConfig{Host: "localhost", Port: 8080}
// }
// }
//
// // Use the config
// useConfig := func(cfg DetailedConfig) readerioresult.ReaderIOResult[string] {
// return func(ctx context.Context) ioresult.IOResult[string] {
// return func() result.Result[string] {
// return result.Of(fmt.Sprintf("%s:%d", cfg.Host, cfg.Port))
// }
// }
// }
//
// // Compose using LocalReaderK
// adapted := LocalReaderK[string](extractConfig)(useConfig)
// ctx := context.WithValue(context.Background(), configKey, DetailedConfig{Host: "api.example.com", Port: 443})
// result := adapted("config.json")(ctx)() // Result: "api.example.com:443"
//
//go:inline
func LocalReaderK[A, R1, R2 any](f reader.Kleisli[R2, R1]) func(ReaderReaderIOResult[R1, A]) ReaderReaderIOResult[R2, A] {
return RRIOE.LocalReaderK[error, A](f)
}
// LocalReaderReaderIOEitherK transforms the outer environment of a ReaderReaderIOResult using a ReaderReaderIOResult-based Kleisli arrow.
// It allows you to modify the outer environment through a computation that depends on both the outer environment
// and the inner context, and can perform IO effects that may fail.
//
// This is the most powerful Local variant, useful when the outer environment transformation requires:
// - Access to both the outer environment (R2) and inner context (context.Context)
// - IO operations that can fail
// - Complex transformations that need the full computational context
//
// The transformation happens in three stages:
// 1. The ReaderReaderIOResult effect f is executed with the R2 outer environment and inner context
// 2. If successful (Ok), the R1 value is passed as the outer environment to the ReaderReaderIOResult[R1, A]
// 3. If failed (Err), the error is propagated without executing the ReaderReaderIOResult
//
// Type Parameters:
// - A: The success type produced by the ReaderReaderIOResult
// - R1: The original outer environment type expected by the ReaderReaderIOResult
// - R2: The new input outer environment type
//
// Parameters:
// - f: A ReaderReaderIOResult Kleisli arrow that transforms R2 to R1 with full context-aware IO effects that can fail
//
// Returns:
// - A function that takes a ReaderReaderIOResult[R1, A] and returns a ReaderReaderIOResult[R2, A]
//
//go:inline
func LocalReaderReaderIOEitherK[A, R1, R2 any](f Kleisli[R2, R2, R1]) func(ReaderReaderIOResult[R1, A]) ReaderReaderIOResult[R2, A] {
return RRIOE.LocalReaderReaderIOEitherK[A](f)

View File

@@ -21,6 +21,7 @@ import (
"fmt"
"testing"
"github.com/IBM/fp-go/v2/context/reader"
"github.com/IBM/fp-go/v2/context/readerioresult"
"github.com/IBM/fp-go/v2/io"
"github.com/IBM/fp-go/v2/ioresult"
@@ -426,3 +427,226 @@ func TestLocalReaderIOResultK(t *testing.T) {
assert.True(t, result.IsLeft(resErr))
})
}
// TestLocalReaderK tests LocalReaderK functionality
func TestLocalReaderK(t *testing.T) {
ctx := context.Background()
t.Run("basic Reader transformation", func(t *testing.T) {
// Reader that transforms string path to SimpleConfig using context
loadConfig := func(path string) reader.Reader[SimpleConfig] {
return func(ctx context.Context) SimpleConfig {
// Could extract values from context here
return SimpleConfig{Port: 8080}
}
}
// ReaderReaderIOResult that uses the config
useConfig := func(cfg SimpleConfig) readerioresult.ReaderIOResult[string] {
return func(ctx context.Context) ioresult.IOResult[string] {
return func() result.Result[string] {
return result.Of(fmt.Sprintf("Port: %d", cfg.Port))
}
}
}
// Compose using LocalReaderK
adapted := LocalReaderK[string](loadConfig)(useConfig)
res := adapted("config.json")(ctx)()
assert.Equal(t, result.Of("Port: 8080"), res)
})
t.Run("extract config from context", func(t *testing.T) {
type ctxKey string
const configKey ctxKey = "config"
// Reader that extracts config from context
extractConfig := func(path string) reader.Reader[DetailedConfig] {
return func(ctx context.Context) DetailedConfig {
if cfg, ok := ctx.Value(configKey).(DetailedConfig); ok {
return cfg
}
// Default config if not in context
return DetailedConfig{Host: "localhost", Port: 8080}
}
}
// Use the config
useConfig := func(cfg DetailedConfig) readerioresult.ReaderIOResult[string] {
return func(ctx context.Context) ioresult.IOResult[string] {
return func() result.Result[string] {
return result.Of(fmt.Sprintf("%s:%d", cfg.Host, cfg.Port))
}
}
}
adapted := LocalReaderK[string](extractConfig)(useConfig)
// With context value
ctxWithConfig := context.WithValue(ctx, configKey, DetailedConfig{Host: "api.example.com", Port: 443})
res := adapted("ignored")(ctxWithConfig)()
assert.Equal(t, result.Of("api.example.com:443"), res)
// Without context value (uses default)
resDefault := adapted("ignored")(ctx)()
assert.Equal(t, result.Of("localhost:8080"), resDefault)
})
t.Run("context-aware transformation", func(t *testing.T) {
type ctxKey string
const multiplierKey ctxKey = "multiplier"
// Reader that uses context to compute environment
computeValue := func(base int) reader.Reader[int] {
return func(ctx context.Context) int {
if mult, ok := ctx.Value(multiplierKey).(int); ok {
return base * mult
}
return base
}
}
// Use the computed value
formatValue := func(val int) readerioresult.ReaderIOResult[string] {
return func(ctx context.Context) ioresult.IOResult[string] {
return func() result.Result[string] {
return result.Of(fmt.Sprintf("Value: %d", val))
}
}
}
adapted := LocalReaderK[string](computeValue)(formatValue)
// With multiplier in context
ctxWithMult := context.WithValue(ctx, multiplierKey, 10)
res := adapted(5)(ctxWithMult)()
assert.Equal(t, result.Of("Value: 50"), res)
// Without multiplier (uses base value)
resBase := adapted(5)(ctx)()
assert.Equal(t, result.Of("Value: 5"), resBase)
})
t.Run("compose multiple LocalReaderK", func(t *testing.T) {
type ctxKey string
const prefixKey ctxKey = "prefix"
// First transformation: int -> string using context
intToString := func(n int) reader.Reader[string] {
return func(ctx context.Context) string {
if prefix, ok := ctx.Value(prefixKey).(string); ok {
return fmt.Sprintf("%s-%d", prefix, n)
}
return fmt.Sprintf("%d", n)
}
}
// Second transformation: string -> SimpleConfig
stringToConfig := func(s string) reader.Reader[SimpleConfig] {
return func(ctx context.Context) SimpleConfig {
return SimpleConfig{Port: len(s) * 100}
}
}
// Use the config
formatConfig := func(cfg SimpleConfig) readerioresult.ReaderIOResult[string] {
return func(ctx context.Context) ioresult.IOResult[string] {
return func() result.Result[string] {
return result.Of(fmt.Sprintf("Port: %d", cfg.Port))
}
}
}
// Compose transformations
step1 := LocalReaderK[string](stringToConfig)(formatConfig)
step2 := LocalReaderK[string](intToString)(step1)
// With prefix in context
ctxWithPrefix := context.WithValue(ctx, prefixKey, "test")
res := step2(42)(ctxWithPrefix)()
// "test-42" has length 7, so port = 700
assert.Equal(t, result.Of("Port: 700"), res)
// Without prefix
resNoPrefix := step2(42)(ctx)()
// "42" has length 2, so port = 200
assert.Equal(t, result.Of("Port: 200"), resNoPrefix)
})
t.Run("error propagation in ReaderReaderIOResult", func(t *testing.T) {
// Reader transformation (pure, cannot fail)
loadConfig := func(path string) reader.Reader[SimpleConfig] {
return func(ctx context.Context) SimpleConfig {
return SimpleConfig{Port: 8080}
}
}
// ReaderReaderIOResult that returns an error
failingOperation := func(cfg SimpleConfig) readerioresult.ReaderIOResult[string] {
return func(ctx context.Context) ioresult.IOResult[string] {
return func() result.Result[string] {
return result.Left[string](errors.New("operation failed"))
}
}
}
adapted := LocalReaderK[string](loadConfig)(failingOperation)
res := adapted("config.json")(ctx)()
// Error from the ReaderReaderIOResult should propagate
assert.True(t, result.IsLeft(res))
})
t.Run("real-world: environment selection based on context", func(t *testing.T) {
type Environment string
const (
Dev Environment = "dev"
Prod Environment = "prod"
)
type ctxKey string
const envKey ctxKey = "environment"
type EnvConfig struct {
Name string
}
// Reader that selects config based on context environment
selectConfig := func(envName EnvConfig) reader.Reader[DetailedConfig] {
return func(ctx context.Context) DetailedConfig {
env := Dev
if e, ok := ctx.Value(envKey).(Environment); ok {
env = e
}
switch env {
case Prod:
return DetailedConfig{Host: "api.production.com", Port: 443}
default:
return DetailedConfig{Host: "localhost", Port: 8080}
}
}
}
// Use the selected config
useConfig := func(cfg DetailedConfig) readerioresult.ReaderIOResult[string] {
return func(ctx context.Context) ioresult.IOResult[string] {
return func() result.Result[string] {
return result.Of(fmt.Sprintf("Connecting to %s:%d", cfg.Host, cfg.Port))
}
}
}
adapted := LocalReaderK[string](selectConfig)(useConfig)
// Production environment
ctxProd := context.WithValue(ctx, envKey, Prod)
resProd := adapted(EnvConfig{Name: "app"})(ctxProd)()
assert.Equal(t, result.Of("Connecting to api.production.com:443"), resProd)
// Development environment (default)
resDev := adapted(EnvConfig{Name: "app"})(ctx)()
assert.Equal(t, result.Of("Connecting to localhost:8080"), resDev)
})
}

View File

@@ -24,6 +24,7 @@ import (
"github.com/IBM/fp-go/v2/logging"
N "github.com/IBM/fp-go/v2/number"
"github.com/IBM/fp-go/v2/pair"
"github.com/IBM/fp-go/v2/result"
"github.com/stretchr/testify/assert"
)
@@ -104,7 +105,8 @@ func TestSLogWithContextLogger(t *testing.T) {
Level: slog.LevelInfo,
}))
ctx := logging.WithLogger(contextLogger)(t.Context())
cancelFct, ctx := pair.Unpack(logging.WithLogger(contextLogger)(t.Context()))
defer cancelFct()
res1 := result.Of("test value")
logged := SLog[string]("Context logger test")(res1)(ctx)

View File

@@ -19,6 +19,8 @@ import (
"context"
"github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/pair"
RR "github.com/IBM/fp-go/v2/readerresult"
)
// Promap is the profunctor map operation that transforms both the input and output of a context-based ReaderResult.
@@ -34,21 +36,24 @@ import (
// The error type is fixed as error and remains unchanged through the transformation.
//
// Type Parameters:
// - R: The input environment type that f transforms into context.Context
// - A: The original success type produced by the ReaderResult
// - B: The new output success type
//
// Parameters:
// - f: Function to transform the input context (contravariant)
// - f: Function to transform the input environment R into context.Context (contravariant)
// - g: Function to transform the output success value from A to B (covariant)
//
// Returns:
// - An Operator that takes a ReaderResult[A] and returns a ReaderResult[B]
// - A Kleisli arrow that takes a ReaderResult[A] and returns a function from R to B
//
// Note: When R is context.Context, this simplifies to an Operator[A, B]
//
//go:inline
func Promap[A, B any](f func(context.Context) (context.Context, context.CancelFunc), g func(A) B) Operator[A, B] {
func Promap[R, A, B any](f pair.Kleisli[context.CancelFunc, R, context.Context], g func(A) B) RR.Kleisli[R, ReaderResult[A], B] {
return function.Flow2(
Local[A](f),
Map(g),
RR.Map[R](g),
)
}
@@ -62,15 +67,18 @@ func Promap[A, B any](f func(context.Context) (context.Context, context.CancelFu
//
// Type Parameters:
// - A: The success type (unchanged)
// - R: The input environment type that f transforms into context.Context
//
// Parameters:
// - f: Function to transform the context, returning a new context and CancelFunc
// - f: Function to transform the input environment R into context.Context, returning a new context and CancelFunc
//
// Returns:
// - An Operator that takes a ReaderResult[A] and returns a ReaderResult[A]
// - A Kleisli arrow that takes a ReaderResult[A] and returns a function from R to A
//
// Note: When R is context.Context, this simplifies to an Operator[A, A]
//
//go:inline
func Contramap[A any](f func(context.Context) (context.Context, context.CancelFunc)) Operator[A, A] {
func Contramap[A, R any](f pair.Kleisli[context.CancelFunc, R, context.Context]) RR.Kleisli[R, ReaderResult[A], A] {
return Local[A](f)
}
@@ -89,16 +97,19 @@ func Contramap[A any](f func(context.Context) (context.Context, context.CancelFu
//
// Type Parameters:
// - A: The result type (unchanged)
// - R: The input environment type that f transforms into context.Context
//
// Parameters:
// - f: Function to transform the context, returning a new context and CancelFunc
// - f: Function to transform the input environment R into context.Context, returning a new context and CancelFunc
//
// Returns:
// - An Operator that takes a ReaderResult[A] and returns a ReaderResult[A]
func Local[A any](f func(context.Context) (context.Context, context.CancelFunc)) Operator[A, A] {
return func(rr ReaderResult[A]) ReaderResult[A] {
return func(ctx context.Context) Result[A] {
otherCtx, otherCancel := f(ctx)
// - A Kleisli arrow that takes a ReaderResult[A] and returns a function from R to A
//
// Note: When R is context.Context, this simplifies to an Operator[A, A]
func Local[A, R any](f pair.Kleisli[context.CancelFunc, R, context.Context]) RR.Kleisli[R, ReaderResult[A], A] {
return func(rr ReaderResult[A]) RR.ReaderResult[R, A] {
return func(r R) Result[A] {
otherCancel, otherCtx := pair.Unpack(f(r))
defer otherCancel()
return rr(otherCtx)
}

View File

@@ -20,6 +20,7 @@ import (
"strconv"
"testing"
"github.com/IBM/fp-go/v2/pair"
R "github.com/IBM/fp-go/v2/result"
"github.com/stretchr/testify/assert"
)
@@ -34,9 +35,9 @@ func TestPromapBasic(t *testing.T) {
return R.Of(0)
}
addKey := func(ctx context.Context) (context.Context, context.CancelFunc) {
addKey := func(ctx context.Context) pair.Pair[context.CancelFunc, context.Context] {
newCtx := context.WithValue(ctx, "key", 42)
return newCtx, func() {}
return pair.MakePair[context.CancelFunc](func() {}, newCtx)
}
toString := strconv.Itoa
@@ -57,9 +58,9 @@ func TestContramapBasic(t *testing.T) {
return R.Of(0)
}
addKey := func(ctx context.Context) (context.Context, context.CancelFunc) {
addKey := func(ctx context.Context) pair.Pair[context.CancelFunc, context.Context] {
newCtx := context.WithValue(ctx, "key", 100)
return newCtx, func() {}
return pair.MakePair[context.CancelFunc](func() {}, newCtx)
}
adapted := Contramap[int](addKey)(getValue)
@@ -79,9 +80,9 @@ func TestLocalBasic(t *testing.T) {
return R.Of("unknown")
}
addUser := func(ctx context.Context) (context.Context, context.CancelFunc) {
addUser := func(ctx context.Context) pair.Pair[context.CancelFunc, context.Context] {
newCtx := context.WithValue(ctx, "user", "Alice")
return newCtx, func() {}
return pair.MakePair[context.CancelFunc](func() {}, newCtx)
}
adapted := Local[string](addUser)(getValue)

View File

@@ -21,8 +21,9 @@ import (
RIORES "github.com/IBM/fp-go/v2/context/readerioresult"
"github.com/IBM/fp-go/v2/function"
"github.com/IBM/fp-go/v2/internal/statet"
RIOR "github.com/IBM/fp-go/v2/readerioresult"
"github.com/IBM/fp-go/v2/pair"
"github.com/IBM/fp-go/v2/result"
SRIOE "github.com/IBM/fp-go/v2/statereaderioeither"
)
// Left creates a StateReaderIOResult that represents a failed computation with the given error.
@@ -202,21 +203,42 @@ func FromResult[S, A any](ma Result[A]) StateReaderIOResult[S, A] {
// Combinators
// Local runs a computation with a modified context.
// The function f transforms the context before passing it to the computation.
// The function f transforms the context before passing it to the computation,
// returning both a new context and a CancelFunc that should be called to release resources.
//
// This is useful for:
// - Adding values to the context
// - Setting timeouts or deadlines
// - Modifying context metadata
//
// The CancelFunc is automatically called after the computation completes to ensure proper cleanup.
//
// Type Parameters:
// - S: The state type
// - A: The result type
// - R: The input environment type that f transforms into context.Context
//
// Parameters:
// - f: Function to transform the input environment R into context.Context, returning a new context and CancelFunc
//
// Returns:
// - A Kleisli arrow that takes a StateReaderIOResult[S, A] and returns a StateReaderIOEither[S, R, error, A]
//
// Note: When R is context.Context, the return type simplifies to func(StateReaderIOResult[S, A]) StateReaderIOResult[S, A]
//
// Example:
//
// // Modify context before running computation
// withTimeout := statereaderioresult.Local[AppState](
// func(ctx context.Context) context.Context {
// ctx, _ = context.WithTimeout(ctx, 60*time.Second)
// return ctx
// }
// // Add a timeout to a specific operation
// withTimeout := statereaderioresult.Local[AppState, Data, context.Context](
// func(ctx context.Context) pair.Pair[context.CancelFunc, context.Context] {
// newCtx, cancel := context.WithTimeout(ctx, 60*time.Second)
// return pair.MakePair(cancel, newCtx)
// },
// )
// result := withTimeout(computation)
func Local[S, A any](f func(context.Context) context.Context) func(StateReaderIOResult[S, A]) StateReaderIOResult[S, A] {
return func(ma StateReaderIOResult[S, A]) StateReaderIOResult[S, A] {
return function.Flow2(ma, RIOR.Local[Pair[S, A]](f))
func Local[S, A, R any](f pair.Kleisli[context.CancelFunc, R, context.Context]) SRIOE.Kleisli[S, R, error, StateReaderIOResult[S, A], A] {
return func(ma StateReaderIOResult[S, A]) SRIOE.StateReaderIOEither[S, R, error, A] {
return function.Flow2(ma, RIORES.Local[Pair[S, A]](f))
}
}

View File

@@ -25,6 +25,7 @@ import (
"github.com/IBM/fp-go/v2/io"
IOR "github.com/IBM/fp-go/v2/ioresult"
N "github.com/IBM/fp-go/v2/number"
"github.com/IBM/fp-go/v2/pair"
P "github.com/IBM/fp-go/v2/pair"
RES "github.com/IBM/fp-go/v2/result"
"github.com/stretchr/testify/assert"
@@ -264,8 +265,8 @@ func TestLocal(t *testing.T) {
// Modify context before running computation
result := Local[testState, string](
func(c context.Context) context.Context {
return context.WithValue(c, "key", "value2")
func(c context.Context) ContextCancel {
return pair.MakePair[context.CancelFunc](func() {}, context.WithValue(c, "key", "value2"))
},
)(comp)

View File

@@ -16,6 +16,8 @@
package statereaderioresult
import (
"context"
RIORES "github.com/IBM/fp-go/v2/context/readerioresult"
"github.com/IBM/fp-go/v2/endomorphism"
"github.com/IBM/fp-go/v2/io"
@@ -84,4 +86,11 @@ type (
Operator[S, A, B any] = Reader[StateReaderIOResult[S, A], StateReaderIOResult[S, B]]
Predicate[A any] = predicate.Predicate[A]
// ContextCancel represents a pair of a cancel function and a context.
// It is used in operations that create new contexts with cancellation capabilities.
//
// The first element is the CancelFunc that should be called to release resources.
// The second element is the new Context that was created.
ContextCancel = Pair[context.CancelFunc, context.Context]
)

View File

@@ -16,6 +16,7 @@
package effect
import (
"github.com/IBM/fp-go/v2/context/reader"
thunk "github.com/IBM/fp-go/v2/context/readerioresult"
"github.com/IBM/fp-go/v2/context/readerreaderioresult"
"github.com/IBM/fp-go/v2/io"
@@ -267,10 +268,89 @@ func LocalThunkK[A, C1, C2 any](f thunk.Kleisli[C2, C1]) func(Effect[C1, A]) Eff
// - Local/Contramap: Pure context transformation (C2 -> C1)
// - LocalIOK: IO-based transformation (C2 -> IO[C1])
// - LocalIOResultK: IO with error handling (C2 -> IOResult[C1])
// - LocalReaderIOResultK: Reader-based with IO and errors (C2 -> ReaderIOResult[C1])
// - LocalThunkK: Reader-based with IO and errors (C2 -> ReaderIOResult[C1])
// - LocalEffectK: Full Effect transformation (C2 -> Effect[C2, C1])
//
//go:inline
func LocalEffectK[A, C1, C2 any](f Kleisli[C2, C2, C1]) func(Effect[C1, A]) Effect[C2, A] {
return readerreaderioresult.LocalReaderReaderIOEitherK[A](f)
}
// LocalReaderK transforms the context of an Effect using a Reader-based Kleisli arrow.
// It allows you to modify the context through a pure computation that depends on the runtime context
// before passing it to the Effect.
//
// This is useful when the context transformation is a pure computation that requires access
// to the runtime context (context.Context) but cannot fail. Common use cases include:
// - Extracting configuration from context values
// - Computing derived context values based on runtime context
// - Transforming context based on runtime metadata
//
// The transformation happens in two stages:
// 1. The Reader function f is executed with the C2 context and runtime context to produce a C1 value
// 2. The resulting C1 value is passed as the context to the Effect[C1, A]
//
// # Type Parameters
//
// - A: The value type produced by the effect
// - C1: The inner context type (required by the original effect)
// - C2: The outer context type (provided to the transformed effect)
//
// # Parameters
//
// - f: A Reader Kleisli arrow that transforms C2 to C1 using the runtime context
//
// # Returns
//
// - func(Effect[C1, A]) Effect[C2, A]: A function that adapts the effect to use C2
//
// # Example
//
// type ctxKey string
// const configKey ctxKey = "config"
//
// type DetailedConfig struct {
// Host string
// Port int
// }
//
// type SimpleConfig struct {
// Port int
// }
//
// // Extract config from runtime context and transform
// extractConfig := func(path string) reader.Reader[DetailedConfig] {
// return func(ctx context.Context) DetailedConfig {
// if cfg, ok := ctx.Value(configKey).(DetailedConfig); ok {
// return cfg
// }
// return DetailedConfig{Host: "localhost", Port: 8080}
// }
// }
//
// // Effect that uses DetailedConfig
// configEffect := effect.Of[DetailedConfig]("connected")
//
// // Transform to use string path instead
// transform := effect.LocalReaderK[string](extractConfig)
// pathEffect := transform(configEffect)
//
// // Run with runtime context containing config
// ctx := context.WithValue(context.Background(), configKey, DetailedConfig{Host: "api.example.com", Port: 443})
// ioResult := effect.Provide[string]("config.json")(pathEffect)
// readerResult := effect.RunSync(ioResult)
// result, err := readerResult(ctx) // Uses config from context
//
// # Comparison with other Local functions
//
// - Local/Contramap: Pure context transformation (C2 -> C1)
// - LocalIOK: IO-based transformation (C2 -> IO[C1])
// - LocalIOResultK: IO with error handling (C2 -> IOResult[C1])
// - LocalReaderK: Reader-based pure transformation with runtime context access (C2 -> Reader[C1])
// - LocalThunkK: Reader-based with IO and errors (C2 -> ReaderIOResult[C1])
// - LocalEffectK: Full Effect transformation (C2 -> Effect[C2, C1])
//
//go:inline
func LocalReaderK[A, C1, C2 any](f reader.Kleisli[C2, C1]) func(Effect[C1, A]) Effect[C2, A] {
return readerreaderioresult.LocalReaderK[A](f)
}

View File

@@ -19,7 +19,9 @@ import (
"context"
"fmt"
"testing"
"time"
"github.com/IBM/fp-go/v2/context/reader"
"github.com/IBM/fp-go/v2/context/readerreaderioresult"
"github.com/stretchr/testify/assert"
)
@@ -618,3 +620,347 @@ func TestLocalEffectK(t *testing.T) {
assert.Equal(t, 60, result) // 3 * 10 * 2
})
}
func TestLocalReaderK(t *testing.T) {
t.Run("basic Reader transformation", func(t *testing.T) {
type SimpleConfig struct {
Port int
}
// Reader that transforms string path to SimpleConfig using runtime context
loadConfig := func(path string) reader.Reader[SimpleConfig] {
return func(ctx context.Context) SimpleConfig {
// Could extract values from runtime context here
return SimpleConfig{Port: 8080}
}
}
// Effect that uses the config
configEffect := Of[SimpleConfig]("connected")
// Transform using LocalReaderK
transform := LocalReaderK[string](loadConfig)
pathEffect := transform(configEffect)
// Run with path
ioResult := Provide[string]("config.json")(pathEffect)
readerResult := RunSync(ioResult)
result, err := readerResult(context.Background())
assert.NoError(t, err)
assert.Equal(t, "connected", result)
})
t.Run("extract config from runtime context", func(t *testing.T) {
type ctxKey string
const configKey ctxKey = "config"
type DetailedConfig struct {
Host string
Port int
}
// Reader that extracts config from runtime context
extractConfig := func(path string) reader.Reader[DetailedConfig] {
return func(ctx context.Context) DetailedConfig {
if cfg, ok := ctx.Value(configKey).(DetailedConfig); ok {
return cfg
}
// Default config if not in runtime context
return DetailedConfig{Host: "localhost", Port: 8080}
}
}
// Effect that uses the config
configEffect := Chain(func(cfg DetailedConfig) Effect[DetailedConfig, string] {
return Of[DetailedConfig](fmt.Sprintf("%s:%d", cfg.Host, cfg.Port))
})(readerreaderioresult.Ask[DetailedConfig]())
transform := LocalReaderK[string](extractConfig)
pathEffect := transform(configEffect)
// With config in runtime context
ctxWithConfig := context.WithValue(context.Background(), configKey, DetailedConfig{Host: "api.example.com", Port: 443})
ioResult := Provide[string]("ignored")(pathEffect)
readerResult := RunSync(ioResult)
result, err := readerResult(ctxWithConfig)
assert.NoError(t, err)
assert.Equal(t, "api.example.com:443", result)
// Without config in runtime context (uses default)
ioResult2 := Provide[string]("ignored")(pathEffect)
readerResult2 := RunSync(ioResult2)
result2, err2 := readerResult2(context.Background())
assert.NoError(t, err2)
assert.Equal(t, "localhost:8080", result2)
})
t.Run("runtime context-aware transformation", func(t *testing.T) {
type ctxKey string
const multiplierKey ctxKey = "multiplier"
// Reader that uses runtime context to compute context
computeValue := func(base int) reader.Reader[int] {
return func(ctx context.Context) int {
if mult, ok := ctx.Value(multiplierKey).(int); ok {
return base * mult
}
return base
}
}
// Effect that uses the computed value
valueEffect := Chain(func(val int) Effect[int, string] {
return Of[int](fmt.Sprintf("Value: %d", val))
})(readerreaderioresult.Ask[int]())
transform := LocalReaderK[string](computeValue)
baseEffect := transform(valueEffect)
// With multiplier in runtime context
ctxWithMult := context.WithValue(context.Background(), multiplierKey, 10)
ioResult := Provide[string](5)(baseEffect)
readerResult := RunSync(ioResult)
result, err := readerResult(ctxWithMult)
assert.NoError(t, err)
assert.Equal(t, "Value: 50", result)
// Without multiplier (uses base value)
ioResult2 := Provide[string](5)(baseEffect)
readerResult2 := RunSync(ioResult2)
result2, err2 := readerResult2(context.Background())
assert.NoError(t, err2)
assert.Equal(t, "Value: 5", result2)
})
t.Run("compose multiple LocalReaderK", func(t *testing.T) {
type ctxKey string
const prefixKey ctxKey = "prefix"
// First transformation: int -> string using runtime context
intToString := func(n int) reader.Reader[string] {
return func(ctx context.Context) string {
if prefix, ok := ctx.Value(prefixKey).(string); ok {
return fmt.Sprintf("%s-%d", prefix, n)
}
return fmt.Sprintf("%d", n)
}
}
// Second transformation: string -> SimpleConfig
type SimpleConfig struct {
Port int
}
stringToConfig := func(s string) reader.Reader[SimpleConfig] {
return func(ctx context.Context) SimpleConfig {
return SimpleConfig{Port: len(s) * 100}
}
}
// Effect that uses the config
configEffect := Chain(func(cfg SimpleConfig) Effect[SimpleConfig, string] {
return Of[SimpleConfig](fmt.Sprintf("Port: %d", cfg.Port))
})(readerreaderioresult.Ask[SimpleConfig]())
// Compose transformations
step1 := LocalReaderK[string](stringToConfig)
step2 := LocalReaderK[string](intToString)
effect1 := step1(configEffect)
effect2 := step2(effect1)
// With prefix in runtime context
ctxWithPrefix := context.WithValue(context.Background(), prefixKey, "test")
ioResult := Provide[string](42)(effect2)
readerResult := RunSync(ioResult)
result, err := readerResult(ctxWithPrefix)
assert.NoError(t, err)
// "test-42" has length 7, so port = 700
assert.Equal(t, "Port: 700", result)
// Without prefix
ioResult2 := Provide[string](42)(effect2)
readerResult2 := RunSync(ioResult2)
result2, err2 := readerResult2(context.Background())
assert.NoError(t, err2)
// "42" has length 2, so port = 200
assert.Equal(t, "Port: 200", result2)
})
t.Run("error propagation from Effect", func(t *testing.T) {
type SimpleConfig struct {
Port int
}
// Reader transformation (pure, cannot fail)
loadConfig := func(path string) reader.Reader[SimpleConfig] {
return func(ctx context.Context) SimpleConfig {
return SimpleConfig{Port: 8080}
}
}
// Effect that returns an error
expectedErr := assert.AnError
failingEffect := Fail[SimpleConfig, string](expectedErr)
transform := LocalReaderK[string](loadConfig)
pathEffect := transform(failingEffect)
ioResult := Provide[string]("config.json")(pathEffect)
readerResult := RunSync(ioResult)
_, err := readerResult(context.Background())
// Error from the Effect should propagate
assert.Error(t, err)
assert.Equal(t, expectedErr, err)
})
t.Run("real-world: environment selection based on runtime context", func(t *testing.T) {
type Environment string
const (
Dev Environment = "dev"
Prod Environment = "prod"
)
type ctxKey string
const envKey ctxKey = "environment"
type EnvConfig struct {
Name string
}
type DetailedConfig struct {
Host string
Port int
}
// Reader that selects config based on runtime context environment
selectConfig := func(envName EnvConfig) reader.Reader[DetailedConfig] {
return func(ctx context.Context) DetailedConfig {
env := Dev
if e, ok := ctx.Value(envKey).(Environment); ok {
env = e
}
switch env {
case Prod:
return DetailedConfig{Host: "api.production.com", Port: 443}
default:
return DetailedConfig{Host: "localhost", Port: 8080}
}
}
}
// Effect that uses the selected config
configEffect := Chain(func(cfg DetailedConfig) Effect[DetailedConfig, string] {
return Of[DetailedConfig](fmt.Sprintf("Connecting to %s:%d", cfg.Host, cfg.Port))
})(readerreaderioresult.Ask[DetailedConfig]())
transform := LocalReaderK[string](selectConfig)
envEffect := transform(configEffect)
// Production environment
ctxProd := context.WithValue(context.Background(), envKey, Prod)
ioResult := Provide[string](EnvConfig{Name: "app"})(envEffect)
readerResult := RunSync(ioResult)
result, err := readerResult(ctxProd)
assert.NoError(t, err)
assert.Equal(t, "Connecting to api.production.com:443", result)
// Development environment (default)
ioResult2 := Provide[string](EnvConfig{Name: "app"})(envEffect)
readerResult2 := RunSync(ioResult2)
result2, err2 := readerResult2(context.Background())
assert.NoError(t, err2)
assert.Equal(t, "Connecting to localhost:8080", result2)
})
t.Run("composes with other Local functions", func(t *testing.T) {
type Level1 struct {
Value string
}
type Level2 struct {
Data string
}
type Level3 struct {
Info string
}
// Effect at deepest level
effect3 := Of[Level3]("result")
// Use LocalReaderK for first transformation (with runtime context access)
localReaderK23 := LocalReaderK[string](func(l2 Level2) reader.Reader[Level3] {
return func(ctx context.Context) Level3 {
return Level3{Info: l2.Data}
}
})
// Use Local for second transformation (pure)
local12 := Local[string](func(l1 Level1) Level2 {
return Level2{Data: l1.Value}
})
// Compose them
effect2 := localReaderK23(effect3)
effect1 := local12(effect2)
// Run
ioResult := Provide[string](Level1{Value: "test"})(effect1)
readerResult := RunSync(ioResult)
result, err := readerResult(context.Background())
assert.NoError(t, err)
assert.Equal(t, "result", result)
})
t.Run("runtime context deadline awareness", func(t *testing.T) {
type Config struct {
HasDeadline bool
}
// Reader that checks runtime context for deadline
checkContext := func(path string) reader.Reader[Config] {
return func(ctx context.Context) Config {
_, hasDeadline := ctx.Deadline()
return Config{HasDeadline: hasDeadline}
}
}
// Effect that uses the config
configEffect := Chain(func(cfg Config) Effect[Config, string] {
return Of[Config](fmt.Sprintf("Has deadline: %v", cfg.HasDeadline))
})(readerreaderioresult.Ask[Config]())
transform := LocalReaderK[string](checkContext)
pathEffect := transform(configEffect)
// Without deadline
ioResult := Provide[string]("config.json")(pathEffect)
readerResult := RunSync(ioResult)
result, err := readerResult(context.Background())
assert.NoError(t, err)
assert.Equal(t, "Has deadline: false", result)
// With deadline
ctxWithDeadline, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
ioResult2 := Provide[string]("config.json")(pathEffect)
readerResult2 := RunSync(ioResult2)
result2, err2 := readerResult2(ctxWithDeadline)
assert.NoError(t, err2)
assert.Equal(t, "Has deadline: true", result2)
})
}

View File

@@ -40,7 +40,7 @@
// increment := N.Add(1)
//
// // Compose them (RIGHT-TO-LEFT execution)
// composed := endomorphism.Compose(double, increment)
// composed := endomorphism.MonadCompose(double, increment)
// result := composed(5) // increment(5) then double: (5 + 1) * 2 = 12
//
// // Chain them (LEFT-TO-RIGHT execution)
@@ -61,11 +61,11 @@
// monoid := endomorphism.Monoid[int]()
//
// // Combine multiple endomorphisms (RIGHT-TO-LEFT execution)
// combined := M.ConcatAll(monoid)(
// combined := M.ConcatAll(monoid)([]endomorphism.Endomorphism[int]{
// N.Mul(2), // applied third
// N.Add(1), // applied second
// N.Mul(3), // applied first
// )
// })
// result := combined(5) // (5 * 3) = 15, (15 + 1) = 16, (16 * 2) = 32
//
// # Monad Operations
@@ -87,7 +87,7 @@
// increment := N.Add(1)
//
// // Compose: RIGHT-TO-LEFT (mathematical composition)
// composed := endomorphism.Compose(double, increment)
// composed := endomorphism.MonadCompose(double, increment)
// result1 := composed(5) // increment(5) * 2 = (5 + 1) * 2 = 12
//
// // MonadChain: LEFT-TO-RIGHT (sequential application)

View File

@@ -111,15 +111,19 @@ func MonadCompose[A any](f, g Endomorphism[A]) Endomorphism[A] {
// This is the functor map operation for endomorphisms.
//
// IMPORTANT: Execution order is RIGHT-TO-LEFT:
// - g is applied first to the input
// - ma is applied first to the input
// - f is applied to the result
//
// Note: unlike most other packages where MonadMap takes (fa, f) with the container
// first, here f (the morphism) comes first to match the right-to-left composition
// convention: MonadMap(f, ma) = f ∘ ma.
//
// Parameters:
// - f: The function to map (outer function)
// - g: The endomorphism to map over (inner function)
// - f: The function to map (outer function, applied second)
// - ma: The endomorphism to map over (inner function, applied first)
//
// Returns:
// - A new endomorphism that applies g, then f
// - A new endomorphism that applies ma, then f
//
// Example:
//
@@ -127,8 +131,8 @@ func MonadCompose[A any](f, g Endomorphism[A]) Endomorphism[A] {
// increment := N.Add(1)
// mapped := endomorphism.MonadMap(double, increment)
// // mapped(5) = double(increment(5)) = double(6) = 12
func MonadMap[A any](f, g Endomorphism[A]) Endomorphism[A] {
return MonadCompose(f, g)
func MonadMap[A any](f, ma Endomorphism[A]) Endomorphism[A] {
return MonadCompose(f, ma)
}
// Compose returns a function that composes an endomorphism with another, executing right to left.
@@ -386,3 +390,91 @@ func Join[A any](f Kleisli[A]) Endomorphism[A] {
return f(a)(a)
}
}
// Read captures a value and returns a function that applies endomorphisms to it.
//
// This function implements a "reader" pattern for endomorphisms. It takes a value
// and returns a function that can apply any endomorphism to that captured value.
// This is useful for creating reusable evaluation contexts where you want to apply
// different transformations to the same initial value.
//
// The returned function has the signature func(Endomorphism[A]) A, which means
// it takes an endomorphism and returns the result of applying that endomorphism
// to the captured value.
//
// # Type Parameters
//
// - A: The type of the value being captured and transformed
//
// # Parameters
//
// - a: The value to capture for later transformation
//
// # Returns
//
// - A function that applies endomorphisms to the captured value
//
// # Example - Basic Usage
//
// // Capture a value
// applyTo5 := Read(5)
//
// // Apply different endomorphisms to the same value
// doubled := applyTo5(N.Mul(2)) // 10
// incremented := applyTo5(N.Add(1)) // 6
// squared := applyTo5(func(x int) int { return x * x }) // 25
//
// # Example - Reusable Evaluation Context
//
// type Config struct {
// Timeout int
// Retries int
// }
//
// baseConfig := Config{Timeout: 30, Retries: 3}
// applyToBase := Read(baseConfig)
//
// // Apply different transformations to the same base config
// withLongTimeout := applyToBase(func(c Config) Config {
// c.Timeout = 60
// return c
// })
//
// withMoreRetries := applyToBase(func(c Config) Config {
// c.Retries = 5
// return c
// })
//
// # Example - Testing Different Transformations
//
// // Useful for testing multiple transformations on the same input
// testValue := "hello"
// applyToTest := Read(testValue)
//
// upperCase := applyToTest(strings.ToUpper) // "HELLO"
// withSuffix := applyToTest(func(s string) string {
// return s + " world"
// }) // "hello world"
//
// # Use Cases
//
// 1. **Testing**: Apply multiple transformations to the same test value
// 2. **Configuration**: Create variations of a base configuration
// 3. **Data Processing**: Evaluate different processing pipelines on the same data
// 4. **Benchmarking**: Compare different endomorphisms on the same input
// 5. **Functional Composition**: Build evaluation contexts for composed operations
//
// # Relationship to Other Functions
//
// Read is complementary to other endomorphism operations:
// - Build applies an endomorphism to the zero value
// - Read applies endomorphisms to a specific captured value
// - Reduce applies multiple endomorphisms sequentially
// - ConcatAll composes multiple endomorphisms
//
//go:inline
func Read[A any](a A) func(Endomorphism[A]) A {
return func(f Endomorphism[A]) A {
return f(a)
}
}

View File

@@ -1071,3 +1071,226 @@ func TestReduceWithBuild(t *testing.T) {
assert.NotEqual(t, reduceResult, buildResult, "Reduce and Build(ConcatAll) produce different results due to execution order")
}
// TestRead tests the Read function
func TestRead(t *testing.T) {
t.Run("applies endomorphism to captured value", func(t *testing.T) {
applyTo5 := Read(5)
result := applyTo5(double)
assert.Equal(t, 10, result, "Read should apply double to captured value 5")
result2 := applyTo5(increment)
assert.Equal(t, 6, result2, "Read should apply increment to captured value 5")
result3 := applyTo5(square)
assert.Equal(t, 25, result3, "Read should apply square to captured value 5")
})
t.Run("captures value for reuse", func(t *testing.T) {
applyTo10 := Read(10)
// Apply multiple different endomorphisms to the same captured value
doubled := applyTo10(double)
incremented := applyTo10(increment)
negated := applyTo10(negate)
assert.Equal(t, 20, doubled, "Should double 10")
assert.Equal(t, 11, incremented, "Should increment 10")
assert.Equal(t, -10, negated, "Should negate 10")
})
t.Run("works with identity", func(t *testing.T) {
applyTo42 := Read(42)
result := applyTo42(Identity[int]())
assert.Equal(t, 42, result, "Read with identity should return original value")
})
t.Run("works with composed endomorphisms", func(t *testing.T) {
applyTo5 := Read(5)
// Compose: double then increment (RIGHT-TO-LEFT)
composed := MonadCompose(increment, double)
result := applyTo5(composed)
assert.Equal(t, 11, result, "Read should work with composed endomorphisms: (5 * 2) + 1 = 11")
})
t.Run("works with chained endomorphisms", func(t *testing.T) {
applyTo5 := Read(5)
// Chain: double then increment (LEFT-TO-RIGHT)
chained := MonadChain(double, increment)
result := applyTo5(chained)
assert.Equal(t, 11, result, "Read should work with chained endomorphisms: (5 * 2) + 1 = 11")
})
t.Run("works with ConcatAll", func(t *testing.T) {
applyTo5 := Read(5)
// ConcatAll composes RIGHT-TO-LEFT
combined := ConcatAll([]Endomorphism[int]{double, increment, square})
result := applyTo5(combined)
// Execution: square(5) = 25, increment(25) = 26, double(26) = 52
assert.Equal(t, 52, result, "Read should work with ConcatAll")
})
t.Run("works with different types", func(t *testing.T) {
// Test with string
applyToHello := Read("hello")
toUpper := func(s string) string { return s + " WORLD" }
result := applyToHello(toUpper)
assert.Equal(t, "hello WORLD", result, "Read should work with strings")
// Test with struct
type Point struct {
X, Y int
}
applyToPoint := Read(Point{X: 3, Y: 4})
scaleX := func(p Point) Point {
p.X *= 2
return p
}
result2 := applyToPoint(scaleX)
assert.Equal(t, Point{X: 6, Y: 4}, result2, "Read should work with structs")
})
t.Run("creates independent evaluation contexts", func(t *testing.T) {
applyTo5 := Read(5)
applyTo10 := Read(10)
// Same endomorphism, different contexts
result5 := applyTo5(double)
result10 := applyTo10(double)
assert.Equal(t, 10, result5, "First context should double 5")
assert.Equal(t, 20, result10, "Second context should double 10")
})
t.Run("useful for testing transformations", func(t *testing.T) {
testValue := 100
applyToTest := Read(testValue)
// Test multiple transformations on the same value
transformations := []struct {
name string
endo Endomorphism[int]
expected int
}{
{"double", double, 200},
{"increment", increment, 101},
{"negate", negate, -100},
{"square", square, 10000},
}
for _, tt := range transformations {
t.Run(tt.name, func(t *testing.T) {
result := applyToTest(tt.endo)
assert.Equal(t, tt.expected, result)
})
}
})
t.Run("works with monoid operations", func(t *testing.T) {
applyTo5 := Read(5)
// Use monoid to combine endomorphisms
combined := M.ConcatAll(Monoid[int]())([]Endomorphism[int]{
double,
increment,
})
result := applyTo5(combined)
// RIGHT-TO-LEFT: increment(5) = 6, double(6) = 12
assert.Equal(t, 12, result, "Read should work with monoid operations")
})
t.Run("configuration example", func(t *testing.T) {
type Config struct {
Timeout int
Retries int
}
baseConfig := Config{Timeout: 30, Retries: 3}
applyToBase := Read(baseConfig)
withLongTimeout := func(c Config) Config {
c.Timeout = 60
return c
}
withMoreRetries := func(c Config) Config {
c.Retries = 5
return c
}
result1 := applyToBase(withLongTimeout)
assert.Equal(t, Config{Timeout: 60, Retries: 3}, result1)
result2 := applyToBase(withMoreRetries)
assert.Equal(t, Config{Timeout: 30, Retries: 5}, result2)
// Original is unchanged
result3 := applyToBase(Identity[Config]())
assert.Equal(t, baseConfig, result3)
})
}
// TestReadWithBuild tests the relationship between Read and Build
func TestReadWithBuild(t *testing.T) {
t.Run("Read applies to specific value, Build to zero value", func(t *testing.T) {
endo := double
// Build applies to zero value
builtResult := Build(endo)
assert.Equal(t, 0, builtResult, "Build should apply to zero value: 0 * 2 = 0")
// Read applies to specific value
readResult := Read(5)(endo)
assert.Equal(t, 10, readResult, "Read should apply to captured value: 5 * 2 = 10")
})
t.Run("Read can evaluate Build results", func(t *testing.T) {
// Build an endomorphism
builder := ConcatAll([]Endomorphism[int]{double, increment})
// Apply it to zero value
builtValue := Build(builder)
// RIGHT-TO-LEFT: increment(0) = 1, double(1) = 2
assert.Equal(t, 2, builtValue)
// Now use Read to apply the same builder to a different value
readValue := Read(5)(builder)
// RIGHT-TO-LEFT: increment(5) = 6, double(6) = 12
assert.Equal(t, 12, readValue)
})
}
// BenchmarkRead benchmarks the Read function
func BenchmarkRead(b *testing.B) {
applyTo5 := Read(5)
b.Run("simple endomorphism", func(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = applyTo5(double)
}
})
b.Run("composed endomorphism", func(b *testing.B) {
composed := MonadCompose(double, increment)
for i := 0; i < b.N; i++ {
_ = applyTo5(composed)
}
})
b.Run("ConcatAll endomorphism", func(b *testing.B) {
combined := ConcatAll([]Endomorphism[int]{double, increment, square})
for i := 0; i < b.N; i++ {
_ = applyTo5(combined)
}
})
}

View File

@@ -144,8 +144,8 @@ func Semigroup[A any]() S.Semigroup[Endomorphism[A]] {
// square := func(x int) int { return x * x }
//
// // Combine multiple endomorphisms (RIGHT-TO-LEFT execution)
// combined := M.ConcatAll(monoid)(double, increment, square)
// result := combined(5) // square(increment(double(5))) = square(increment(10)) = square(11) = 121
// combined := M.ConcatAll(monoid)([]Endomorphism[int]{double, increment, square})
// result := combined(5) // double(increment(square(5))) = double(increment(25)) = double(26) = 52
func Monoid[A any]() M.Monoid[Endomorphism[A]] {
return M.MakeMonoid(MonadCompose[A], Identity[A]())
}

View File

@@ -41,20 +41,22 @@ type (
// It's a function from A to Endomorphism[A], used for composing endomorphic operations.
Kleisli[A any] = func(A) Endomorphism[A]
// Operator represents a transformation from one endomorphism to another.
// Operator represents a higher-order transformation on endomorphisms of the same type.
//
// An Operator takes an endomorphism on type A and produces an endomorphism on type B.
// This is useful for lifting operations or transforming endomorphisms in a generic way.
// An Operator takes an endomorphism on type A and produces another endomorphism on type A.
// Since Operator[A] = Endomorphism[Endomorphism[A]] = func(func(A)A) func(A)A,
// both the input and output endomorphisms operate on the same type A.
//
// This is the return type of curried operations such as Compose, Map, and Chain.
//
// Example:
//
// // An operator that converts an int endomorphism to a string endomorphism
// intToString := func(f endomorphism.Endomorphism[int]) endomorphism.Endomorphism[string] {
// return func(s string) string {
// n, _ := strconv.Atoi(s)
// result := f(n)
// return strconv.Itoa(result)
// }
// // An operator that applies any endomorphism twice
// var applyTwice endomorphism.Operator[int] = func(f endomorphism.Endomorphism[int]) endomorphism.Endomorphism[int] {
// return func(x int) int { return f(f(x)) }
// }
// double := N.Mul(2)
// result := applyTwice(double) // double ∘ double
// // result(5) = double(double(5)) = double(10) = 20
Operator[A any] = Endomorphism[Endomorphism[A]]
)

View File

@@ -23,6 +23,8 @@ import (
"log"
"log/slog"
"sync/atomic"
"github.com/IBM/fp-go/v2/pair"
)
// LoggingCallbacks creates a pair of logging callback functions from the provided loggers.
@@ -128,6 +130,7 @@ var loggerInContextKey loggerInContextType
// logger.Info("Processing request")
// }
func GetLoggerFromContext(ctx context.Context) *slog.Logger {
// using idomatic style to avoid import cycle
value, ok := ctx.Value(loggerInContextKey).(*slog.Logger)
if !ok {
return globalLogger.Load()
@@ -135,9 +138,11 @@ func GetLoggerFromContext(ctx context.Context) *slog.Logger {
return value
}
// WithLogger returns an endomorphism that adds a logger to a context.
// An endomorphism is a function that takes a value and returns a value of the same type.
// This function creates a context transformation that embeds the provided logger.
func noop() {}
// WithLogger returns a Kleisli arrow that adds a logger to a context.
// A Kleisli arrow transforms a context into a ContextCancel pair containing
// a no-op cancel function and the new context with the embedded logger.
//
// This is particularly useful in functional programming patterns where you want to
// compose context transformations, or when working with middleware that needs to
@@ -147,7 +152,7 @@ func GetLoggerFromContext(ctx context.Context) *slog.Logger {
// - l: The *slog.Logger to embed in the context
//
// Returns:
// - An Endomorphism[context.Context] function that adds the logger to a context
// - A Kleisli arrow (function from context.Context to ContextCancel) that adds the logger to a context
//
// Example:
//
@@ -156,13 +161,14 @@ func GetLoggerFromContext(ctx context.Context) *slog.Logger {
//
// // Apply it to a context
// ctx := context.Background()
// ctxWithLogger := addLogger(ctx)
// result := addLogger(ctx)
// ctxWithLogger := pair.Second(result)
//
// // Retrieve the logger later
// logger := GetLoggerFromContext(ctxWithLogger)
// logger.Info("Using context logger")
func WithLogger(l *slog.Logger) Endomorphism[context.Context] {
return func(ctx context.Context) context.Context {
return context.WithValue(ctx, loggerInContextKey, l)
func WithLogger(l *slog.Logger) pair.Kleisli[context.CancelFunc, context.Context, context.Context] {
return func(ctx context.Context) ContextCancel {
return pair.MakePair[context.CancelFunc](noop, context.WithValue(ctx, loggerInContextKey, l))
}
}

View File

@@ -17,10 +17,13 @@ package logging
import (
"bytes"
"context"
"log"
"log/slog"
"strings"
"testing"
"github.com/IBM/fp-go/v2/pair"
S "github.com/IBM/fp-go/v2/string"
)
@@ -288,3 +291,355 @@ func BenchmarkLoggingCallbacks_Logging(b *testing.B) {
infoLog("benchmark message %d", i)
}
}
// TestSetLogger_Success tests setting a new global logger and verifying it returns the old one.
func TestSetLogger_Success(t *testing.T) {
// Save original logger to restore later
originalLogger := GetLogger()
defer SetLogger(originalLogger)
// Create a new logger
var buf bytes.Buffer
handler := slog.NewTextHandler(&buf, nil)
newLogger := slog.New(handler)
// Set the new logger
oldLogger := SetLogger(newLogger)
// Verify old logger was returned
if oldLogger == nil {
t.Error("Expected SetLogger to return the previous logger")
}
// Verify new logger is now active
currentLogger := GetLogger()
if currentLogger != newLogger {
t.Error("Expected GetLogger to return the newly set logger")
}
}
// TestSetLogger_Multiple tests setting logger multiple times.
func TestSetLogger_Multiple(t *testing.T) {
// Save original logger to restore later
originalLogger := GetLogger()
defer SetLogger(originalLogger)
// Create three loggers
logger1 := slog.New(slog.NewTextHandler(&bytes.Buffer{}, nil))
logger2 := slog.New(slog.NewTextHandler(&bytes.Buffer{}, nil))
logger3 := slog.New(slog.NewTextHandler(&bytes.Buffer{}, nil))
// Set first logger
old1 := SetLogger(logger1)
if GetLogger() != logger1 {
t.Error("Expected logger1 to be active")
}
// Set second logger
old2 := SetLogger(logger2)
if old2 != logger1 {
t.Error("Expected SetLogger to return logger1")
}
if GetLogger() != logger2 {
t.Error("Expected logger2 to be active")
}
// Set third logger
old3 := SetLogger(logger3)
if old3 != logger2 {
t.Error("Expected SetLogger to return logger2")
}
if GetLogger() != logger3 {
t.Error("Expected logger3 to be active")
}
// Restore to original
restored := SetLogger(old1)
if restored != logger3 {
t.Error("Expected SetLogger to return logger3")
}
}
// TestGetLogger_Default tests that GetLogger returns a valid logger by default.
func TestGetLogger_Default(t *testing.T) {
logger := GetLogger()
if logger == nil {
t.Error("Expected GetLogger to return a non-nil logger")
}
// Verify it's usable
var buf bytes.Buffer
handler := slog.NewTextHandler(&buf, nil)
testLogger := slog.New(handler)
oldLogger := SetLogger(testLogger)
defer SetLogger(oldLogger)
GetLogger().Info("test message")
if !strings.Contains(buf.String(), "test message") {
t.Errorf("Expected logger to log message, got: %s", buf.String())
}
}
// TestGetLogger_AfterSet tests that GetLogger returns the logger set by SetLogger.
func TestGetLogger_AfterSet(t *testing.T) {
originalLogger := GetLogger()
defer SetLogger(originalLogger)
var buf bytes.Buffer
handler := slog.NewTextHandler(&buf, nil)
customLogger := slog.New(handler)
SetLogger(customLogger)
retrievedLogger := GetLogger()
if retrievedLogger != customLogger {
t.Error("Expected GetLogger to return the custom logger")
}
// Verify it's the same instance by logging
retrievedLogger.Info("test")
if !strings.Contains(buf.String(), "test") {
t.Error("Expected retrieved logger to be the same instance")
}
}
// TestGetLoggerFromContext_WithLogger tests retrieving a logger from context.
func TestGetLoggerFromContext_WithLogger(t *testing.T) {
var buf bytes.Buffer
handler := slog.NewTextHandler(&buf, nil)
contextLogger := slog.New(handler)
// Create context with logger using WithLogger
ctx := context.Background()
kleisli := WithLogger(contextLogger)
result := kleisli(ctx)
ctxWithLogger := pair.Second(result)
// Retrieve logger from context
retrievedLogger := GetLoggerFromContext(ctxWithLogger)
if retrievedLogger != contextLogger {
t.Error("Expected to retrieve the context logger")
}
// Verify it's the same instance by logging
retrievedLogger.Info("context test")
if !strings.Contains(buf.String(), "context test") {
t.Error("Expected retrieved logger to be the same instance")
}
}
// TestGetLoggerFromContext_WithoutLogger tests that it returns global logger when context has no logger.
func TestGetLoggerFromContext_WithoutLogger(t *testing.T) {
originalLogger := GetLogger()
defer SetLogger(originalLogger)
var buf bytes.Buffer
handler := slog.NewTextHandler(&buf, nil)
globalLogger := slog.New(handler)
SetLogger(globalLogger)
// Create context without logger
ctx := context.Background()
// Should return global logger
retrievedLogger := GetLoggerFromContext(ctx)
if retrievedLogger != globalLogger {
t.Error("Expected to retrieve the global logger when context has no logger")
}
// Verify it's the same instance
retrievedLogger.Info("global test")
if !strings.Contains(buf.String(), "global test") {
t.Error("Expected retrieved logger to be the global logger")
}
}
// TestGetLoggerFromContext_NilContext tests behavior with nil context value.
func TestGetLoggerFromContext_NilContext(t *testing.T) {
originalLogger := GetLogger()
defer SetLogger(originalLogger)
var buf bytes.Buffer
handler := slog.NewTextHandler(&buf, nil)
globalLogger := slog.New(handler)
SetLogger(globalLogger)
// Create context with wrong type value
ctx := context.WithValue(context.Background(), loggerInContextKey, "not a logger")
// Should return global logger when type assertion fails
retrievedLogger := GetLoggerFromContext(ctx)
if retrievedLogger != globalLogger {
t.Error("Expected to retrieve the global logger when context value is wrong type")
}
}
// TestWithLogger_CreatesContextWithLogger tests that WithLogger adds logger to context.
func TestWithLogger_CreatesContextWithLogger(t *testing.T) {
var buf bytes.Buffer
handler := slog.NewTextHandler(&buf, nil)
testLogger := slog.New(handler)
// Create Kleisli arrow
kleisli := WithLogger(testLogger)
// Apply to context
ctx := context.Background()
result := kleisli(ctx)
// Verify result is a ContextCancel pair
cancelFunc := pair.First(result)
newCtx := pair.Second(result)
if cancelFunc == nil {
t.Error("Expected cancel function to be non-nil")
}
if newCtx == nil {
t.Error("Expected new context to be non-nil")
}
// Verify logger is in context
retrievedLogger := GetLoggerFromContext(newCtx)
if retrievedLogger != testLogger {
t.Error("Expected logger to be in the new context")
}
}
// TestWithLogger_CancelFuncIsNoop tests that the cancel function is a no-op.
func TestWithLogger_CancelFuncIsNoop(t *testing.T) {
testLogger := slog.New(slog.NewTextHandler(&bytes.Buffer{}, nil))
kleisli := WithLogger(testLogger)
ctx := context.Background()
result := kleisli(ctx)
cancelFunc := pair.First(result)
// Calling cancel should not panic
defer func() {
if r := recover(); r != nil {
t.Errorf("Cancel function panicked: %v", r)
}
}()
cancelFunc()
}
// TestWithLogger_PreservesOriginalContext tests that original context is not modified.
func TestWithLogger_PreservesOriginalContext(t *testing.T) {
originalLogger := GetLogger()
defer SetLogger(originalLogger)
var buf bytes.Buffer
handler := slog.NewTextHandler(&buf, nil)
globalLogger := slog.New(handler)
SetLogger(globalLogger)
testLogger := slog.New(slog.NewTextHandler(&bytes.Buffer{}, nil))
kleisli := WithLogger(testLogger)
// Original context without logger
originalCtx := context.Background()
// Apply transformation
result := kleisli(originalCtx)
newCtx := pair.Second(result)
// Original context should still return global logger
originalCtxLogger := GetLoggerFromContext(originalCtx)
if originalCtxLogger != globalLogger {
t.Error("Expected original context to still use global logger")
}
// New context should have the test logger
newCtxLogger := GetLoggerFromContext(newCtx)
if newCtxLogger != testLogger {
t.Error("Expected new context to have the test logger")
}
}
// TestWithLogger_Composition tests composing multiple WithLogger calls.
func TestWithLogger_Composition(t *testing.T) {
logger1 := slog.New(slog.NewTextHandler(&bytes.Buffer{}, nil))
logger2 := slog.New(slog.NewTextHandler(&bytes.Buffer{}, nil))
kleisli1 := WithLogger(logger1)
kleisli2 := WithLogger(logger2)
ctx := context.Background()
// Apply first transformation
result1 := kleisli1(ctx)
ctx1 := pair.Second(result1)
// Verify first logger
if GetLoggerFromContext(ctx1) != logger1 {
t.Error("Expected first logger in context after first transformation")
}
// Apply second transformation (should override)
result2 := kleisli2(ctx1)
ctx2 := pair.Second(result2)
// Verify second logger (should override first)
if GetLoggerFromContext(ctx2) != logger2 {
t.Error("Expected second logger to override first logger")
}
}
// BenchmarkSetLogger benchmarks setting the global logger.
func BenchmarkSetLogger(b *testing.B) {
logger := slog.New(slog.NewTextHandler(&bytes.Buffer{}, nil))
b.ResetTimer()
for i := 0; i < b.N; i++ {
SetLogger(logger)
}
}
// BenchmarkGetLogger benchmarks getting the global logger.
func BenchmarkGetLogger(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
GetLogger()
}
}
// BenchmarkGetLoggerFromContext_WithLogger benchmarks retrieving logger from context.
func BenchmarkGetLoggerFromContext_WithLogger(b *testing.B) {
logger := slog.New(slog.NewTextHandler(&bytes.Buffer{}, nil))
kleisli := WithLogger(logger)
ctx := pair.Second(kleisli(context.Background()))
b.ResetTimer()
for i := 0; i < b.N; i++ {
GetLoggerFromContext(ctx)
}
}
// BenchmarkGetLoggerFromContext_WithoutLogger benchmarks retrieving global logger from context.
func BenchmarkGetLoggerFromContext_WithoutLogger(b *testing.B) {
ctx := context.Background()
b.ResetTimer()
for i := 0; i < b.N; i++ {
GetLoggerFromContext(ctx)
}
}
// BenchmarkWithLogger benchmarks creating context with logger.
func BenchmarkWithLogger(b *testing.B) {
logger := slog.New(slog.NewTextHandler(&bytes.Buffer{}, nil))
kleisli := WithLogger(logger)
ctx := context.Background()
b.ResetTimer()
for i := 0; i < b.N; i++ {
kleisli(ctx)
}
}

View File

@@ -16,7 +16,10 @@
package logging
import (
"context"
"github.com/IBM/fp-go/v2/endomorphism"
"github.com/IBM/fp-go/v2/pair"
)
type (
@@ -39,4 +42,15 @@ type (
// ctx := context.Background()
// newCtx := addLogger(ctx) // Both ctx and newCtx are context.Context
Endomorphism[A any] = endomorphism.Endomorphism[A]
// Pair represents a tuple of two values of types A and B.
// It is used to group two related values together.
Pair[A, B any] = pair.Pair[A, B]
// ContextCancel represents a pair of a cancel function and a context.
// It is used in operations that create new contexts with cancellation capabilities.
//
// The first element is the CancelFunc that should be called to release resources.
// The second element is the new Context that was created.
ContextCancel = Pair[context.CancelFunc, context.Context]
)

View File

@@ -3,6 +3,7 @@ package validation
import (
"fmt"
"log/slog"
"strings"
A "github.com/IBM/fp-go/v2/array"
"github.com/IBM/fp-go/v2/either"
@@ -12,6 +13,11 @@ import (
// Returns a generic error message indicating this is a validation error.
// For detailed error information, use String() or Format() methods.
// toError converts the validation error to the error interface
func toError(v *ValidationError) error {
return v
}
// Error implements the error interface for ValidationError.
// Returns a generic error message.
func (v *ValidationError) Error() string {
@@ -34,44 +40,45 @@ func (v *ValidationError) String() string {
// It includes the context path, message, and optionally the cause error.
// Supports verbs: %s, %v, %+v (with additional details)
func (v *ValidationError) Format(s fmt.State, verb rune) {
// Build the context path
path := ""
for i, entry := range v.Context {
if i > 0 {
path += "."
}
if entry.Key != "" {
path += entry.Key
} else {
path += entry.Type
}
}
var result strings.Builder
// Start with the path if available
result := ""
if path != "" {
result = fmt.Sprintf("at %s: ", path)
// Build the context path
if len(v.Context) > 0 {
var path strings.Builder
for i, entry := range v.Context {
if i > 0 {
path.WriteString(".")
}
if entry.Key != "" {
path.WriteString(entry.Key)
} else {
path.WriteString(entry.Type)
}
}
result.WriteString("at ")
result.WriteString(path.String())
result.WriteString(": ")
}
// Add the message
result += v.Messsage
result.WriteString(v.Messsage)
// Add the cause if present
if v.Cause != nil {
if s.Flag('+') && verb == 'v' {
// Verbose format with detailed cause
result += fmt.Sprintf("\n caused by: %+v", v.Cause)
fmt.Fprintf(&result, "\n caused by: %+v", v.Cause)
} else {
result += fmt.Sprintf(" (caused by: %v)", v.Cause)
fmt.Fprintf(&result, " (caused by: %v)", v.Cause)
}
}
// Add value information for verbose format
if s.Flag('+') && verb == 'v' {
result += fmt.Sprintf("\n value: %#v", v.Value)
fmt.Fprintf(&result, "\n value: %#v", v.Value)
}
fmt.Fprint(s, result)
fmt.Fprint(s, result.String())
}
// LogValue implements the slog.LogValuer interface for ValidationError.
@@ -94,18 +101,18 @@ func (v *ValidationError) LogValue() slog.Value {
// Add context path if available
if len(v.Context) > 0 {
path := ""
var path strings.Builder
for i, entry := range v.Context {
if i > 0 {
path += "."
path.WriteString(".")
}
if entry.Key != "" {
path += entry.Key
path.WriteString(entry.Key)
} else {
path += entry.Type
path.WriteString(entry.Type)
}
}
attrs = append(attrs, slog.String("path", path))
attrs = append(attrs, slog.String("path", path.String()))
}
// Add cause if present
@@ -119,13 +126,14 @@ func (v *ValidationError) LogValue() slog.Value {
// Error implements the error interface for ValidationErrors.
// Returns a generic error message indicating validation errors occurred.
func (ve *validationErrors) Error() string {
if len(ve.errors) == 0 {
switch len(ve.errors) {
case 0:
return "ValidationErrors: no errors"
}
if len(ve.errors) == 1 {
case 1:
return "ValidationErrors: 1 error"
default:
return fmt.Sprintf("ValidationErrors: %d errors", len(ve.errors))
}
return fmt.Sprintf("ValidationErrors: %d errors", len(ve.errors))
}
// Unwrap returns the underlying cause error if present.
@@ -134,8 +142,31 @@ func (ve *validationErrors) Unwrap() error {
return ve.cause
}
// Errors implements the ErrorsProvider interface for validationErrors.
// It converts the internal collection of ValidationError pointers to a slice of error interfaces.
// This method enables uniform error extraction from validation error collections.
//
// The returned slice contains the same errors as the internal errors field,
// but typed as error interface values for compatibility with standard Go error handling.
//
// Returns:
// - A slice of error interfaces, one for each ValidationError in the collection
//
// Example:
//
// ve := &validationErrors{
// errors: Errors{
// &ValidationError{Messsage: "invalid email"},
// &ValidationError{Messsage: "age must be positive"},
// },
// }
// errs := ve.Errors()
// // errs is []error with 2 elements, each implementing the error interface
// for _, err := range errs {
// fmt.Println(err.Error()) // "ValidationError"
// }
func (ve *validationErrors) Errors() []error {
return ve.Errors()
return A.MonadMap(ve.errors, toError)
}
// String returns a simple string representation of all validation errors.
@@ -145,16 +176,17 @@ func (ve *validationErrors) String() string {
return "ValidationErrors: no errors"
}
result := fmt.Sprintf("ValidationErrors (%d):\n", len(ve.errors))
var result strings.Builder
fmt.Fprintf(&result, "ValidationErrors (%d):\n", len(ve.errors))
for i, err := range ve.errors {
result += fmt.Sprintf(" [%d] %s\n", i, err.String())
fmt.Fprintf(&result, " [%d] %s\n", i, err.String())
}
if ve.cause != nil {
result += fmt.Sprintf(" caused by: %v\n", ve.cause)
fmt.Fprintf(&result, " caused by: %v\n", ve.cause)
}
return result
return result.String()
}
// Format implements fmt.Formatter for custom formatting of ValidationErrors.

View File

@@ -846,3 +846,142 @@ func TestLogValuerInterface(t *testing.T) {
var _ slog.LogValuer = (*validationErrors)(nil)
})
}
// TestValidationErrors_Errors tests the Errors() method implementation
func TestValidationErrors_Errors(t *testing.T) {
t.Run("returns empty slice for no errors", func(t *testing.T) {
ve := &validationErrors{
errors: Errors{},
}
errs := ve.Errors()
assert.Empty(t, errs)
assert.NotNil(t, errs)
})
t.Run("converts single ValidationError to error interface", func(t *testing.T) {
ve := &validationErrors{
errors: Errors{
&ValidationError{Value: "test", Messsage: "invalid value"},
},
}
errs := ve.Errors()
require.Len(t, errs, 1)
assert.Equal(t, "ValidationError", errs[0].Error())
})
t.Run("converts multiple ValidationErrors to error interfaces", func(t *testing.T) {
ve := &validationErrors{
errors: Errors{
&ValidationError{Value: "test1", Messsage: "error 1"},
&ValidationError{Value: "test2", Messsage: "error 2"},
&ValidationError{Value: "test3", Messsage: "error 3"},
},
}
errs := ve.Errors()
require.Len(t, errs, 3)
for _, err := range errs {
assert.Equal(t, "ValidationError", err.Error())
}
})
t.Run("preserves error details in converted errors", func(t *testing.T) {
originalErr := &ValidationError{
Value: "abc",
Context: []ContextEntry{{Key: "field"}},
Messsage: "invalid format",
Cause: errors.New("parse error"),
}
ve := &validationErrors{
errors: Errors{originalErr},
}
errs := ve.Errors()
require.Len(t, errs, 1)
// Verify the error can be type-asserted back to ValidationError
validationErr, ok := errs[0].(*ValidationError)
require.True(t, ok)
assert.Equal(t, "abc", validationErr.Value)
assert.Equal(t, "invalid format", validationErr.Messsage)
assert.NotNil(t, validationErr.Cause)
assert.Len(t, validationErr.Context, 1)
})
t.Run("implements ErrorsProvider interface", func(t *testing.T) {
ve := &validationErrors{
errors: Errors{
&ValidationError{Messsage: "error 1"},
&ValidationError{Messsage: "error 2"},
},
}
// Verify it implements ErrorsProvider
var provider ErrorsProvider = ve
errs := provider.Errors()
assert.Len(t, errs, 2)
})
t.Run("returned errors are usable with standard error handling", func(t *testing.T) {
cause := errors.New("underlying error")
ve := &validationErrors{
errors: Errors{
&ValidationError{
Value: "test",
Messsage: "validation failed",
Cause: cause,
},
},
}
errs := ve.Errors()
require.Len(t, errs, 1)
// Test with errors.Is
assert.True(t, errors.Is(errs[0], cause))
// Test with errors.As
var validationErr *ValidationError
assert.True(t, errors.As(errs[0], &validationErr))
assert.Equal(t, "validation failed", validationErr.Messsage)
})
t.Run("does not modify original errors slice", func(t *testing.T) {
originalErrors := Errors{
&ValidationError{Value: "test1", Messsage: "error 1"},
&ValidationError{Value: "test2", Messsage: "error 2"},
}
ve := &validationErrors{
errors: originalErrors,
}
errs := ve.Errors()
require.Len(t, errs, 2)
// Original should be unchanged
assert.Len(t, ve.errors, 2)
assert.Equal(t, originalErrors, ve.errors)
})
t.Run("each error in slice is independent", func(t *testing.T) {
ve := &validationErrors{
errors: Errors{
&ValidationError{Value: "test1", Messsage: "error 1"},
&ValidationError{Value: "test2", Messsage: "error 2"},
},
}
errs := ve.Errors()
require.Len(t, errs, 2)
// Verify each error is distinct
err1, ok1 := errs[0].(*ValidationError)
err2, ok2 := errs[1].(*ValidationError)
require.True(t, ok1)
require.True(t, ok2)
assert.NotEqual(t, err1.Messsage, err2.Messsage)
assert.NotEqual(t, err1.Value, err2.Value)
})
}

View File

@@ -1,6 +1,7 @@
package readerio
import (
"github.com/IBM/fp-go/v2/function"
G "github.com/IBM/fp-go/v2/internal/bracket"
)
@@ -30,3 +31,10 @@ func Bracket[
release,
)
}
//go:inline
func WithResource[R, A, B, ANY any](
onCreate ReaderIO[R, A], onRelease Kleisli[R, A, ANY]) Kleisli[R, Kleisli[R, A, B], B] {
return function.Bind13of3(Bracket[R, A, B, ANY])(onCreate, function.Ignore2of2[B](onRelease))
}

View File

@@ -61,6 +61,18 @@ func LocalReaderIOEitherK[A, C, E, R1, R2 any](f readerioeither.Kleisli[C, E, R2
}
}
//go:inline
func LocalReaderK[E, A, C, R1, R2 any](f reader.Kleisli[C, R2, R1]) func(ReaderReaderIOEither[R1, C, E, A]) ReaderReaderIOEither[R2, C, E, A] {
return func(rri ReaderReaderIOEither[R1, C, E, A]) ReaderReaderIOEither[R2, C, E, A] {
return F.Flow4(
f,
readerioeither.FromReader,
readerioeither.Map[C, E](rri),
readerioeither.Flatten,
)
}
}
//go:inline
func LocalReaderReaderIOEitherK[A, C, E, R1, R2 any](f Kleisli[R2, C, E, R2, R1]) func(ReaderReaderIOEither[R1, C, E, A]) ReaderReaderIOEither[R2, C, E, A] {
return func(rri ReaderReaderIOEither[R1, C, E, A]) ReaderReaderIOEither[R2, C, E, A] {

View File

@@ -38,21 +38,41 @@ func IsNonEmpty[M ~map[K]V, K comparable, V any](r M) bool {
}
func Keys[M ~map[K]V, GK ~[]K, K comparable, V any](r M) GK {
// fast path
if len(r) == 0 {
return nil
}
// full implementation
return collect[M, GK](r, F.First[K, V])
}
func Values[M ~map[K]V, GV ~[]V, K comparable, V any](r M) GV {
// fast path
if len(r) == 0 {
return nil
}
// full implementation
return collect[M, GV](r, F.Second[K, V])
}
func KeysOrd[M ~map[K]V, GK ~[]K, K comparable, V any](o ord.Ord[K]) func(r M) GK {
return func(r M) GK {
// fast path
if len(r) == 0 {
return nil
}
// full implementation
return collectOrd[M, GK](o, r, F.First[K, V])
}
}
func ValuesOrd[M ~map[K]V, GV ~[]V, K comparable, V any](o ord.Ord[K]) func(r M) GV {
return func(r M) GV {
// fast path
if len(r) == 0 {
return nil
}
// full implementation
return collectOrd[M, GV](o, r, F.Second[K, V])
}
}
@@ -97,12 +117,18 @@ func collect[M ~map[K]V, GR ~[]R, K comparable, V, R any](r M, f func(K, V) R) G
}
func Collect[M ~map[K]V, GR ~[]R, K comparable, V, R any](f func(K, V) R) func(M) GR {
// full implementation
return F.Bind2nd(collect[M, GR, K, V, R], f)
}
func CollectOrd[M ~map[K]V, GR ~[]R, K comparable, V, R any](o ord.Ord[K]) func(f func(K, V) R) func(M) GR {
return func(f func(K, V) R) func(M) GR {
return func(r M) GR {
// fast path
if len(r) == 0 {
return nil
}
// full implementation
return collectOrd[M, GR](o, r, f)
}
}
@@ -416,12 +442,22 @@ func duplicate[M ~map[K]V, K comparable, V any](r M) M {
}
func upsertAt[M ~map[K]V, K comparable, V any](r M, k K, v V) M {
// fast path
if len(r) == 0 {
return Singleton[M](k, v)
}
// duplicate and update
dup := duplicate(r)
dup[k] = v
return dup
}
func deleteAt[M ~map[K]V, K comparable, V any](r M, k K) M {
// fast path
if len(r) == 0 {
return r
}
// duplicate and update
dup := duplicate(r)
delete(dup, k)
return dup

View File

@@ -55,10 +55,16 @@ func IsNonEmpty[K comparable, V any](r Record[K, V]) bool {
// The order of keys is non-deterministic due to Go's map iteration behavior.
// Use KeysOrd if you need keys in a specific order.
//
// Note: The return value can be nil in case of an empty map, since nil is a
// valid representation of an empty slice in Go.
//
// Example:
//
// record := Record[string, int]{"a": 1, "b": 2, "c": 3}
// keys := Keys(record) // ["a", "b", "c"] in any order
//
// emptyRecord := Record[string, int]{}
// emptyKeys := Keys(emptyRecord) // nil or []string{}
func Keys[K comparable, V any](r Record[K, V]) []K {
return G.Keys[Record[K, V], []K](r)
}
@@ -68,10 +74,16 @@ func Keys[K comparable, V any](r Record[K, V]) []K {
// The order of values is non-deterministic due to Go's map iteration behavior.
// Use ValuesOrd if you need values ordered by their keys.
//
// Note: The return value can be nil in case of an empty map, since nil is a
// valid representation of an empty slice in Go.
//
// Example:
//
// record := Record[string, int]{"a": 1, "b": 2, "c": 3}
// values := Values(record) // [1, 2, 3] in any order
//
// emptyRecord := Record[string, int]{}
// emptyValues := Values(emptyRecord) // nil or []int{}
func Values[K comparable, V any](r Record[K, V]) []V {
return G.Values[Record[K, V], []V](r)
}
@@ -98,6 +110,9 @@ func Collect[K comparable, V, R any](f func(K, V) R) func(Record[K, V]) []R {
//
// Unlike Collect, this function guarantees the order of results based on key ordering.
//
// Note: The return value can be nil in case of an empty map, since nil is a
// valid representation of an empty slice in Go.
//
// Example:
//
// record := Record[string, int]{"c": 3, "a": 1, "b": 2}
@@ -105,6 +120,9 @@ func Collect[K comparable, V, R any](f func(K, V) R) func(Record[K, V]) []R {
// return fmt.Sprintf("%s=%d", k, v)
// })
// result := toStrings(record) // ["a=1", "b=2", "c=3"] (ordered by key)
//
// emptyRecord := Record[string, int]{}
// emptyResult := toStrings(emptyRecord) // nil or []string{}
func CollectOrd[V, R any, K comparable](o ord.Ord[K]) func(func(K, V) R) func(Record[K, V]) []R {
return G.CollectOrd[Record[K, V], []R](o)
}
@@ -458,11 +476,18 @@ func UpsertAt[K comparable, V any](k K, v V) Operator[K, V, V] {
// If the key doesn't exist, the record is returned unchanged.
// The original record is not modified; a new record is returned.
//
// In case of an empty input map (including nil maps), the identical map is returned,
// since deleting from an empty map is an idempotent operation.
//
// Example:
//
// record := Record[string, int]{"a": 1, "b": 2, "c": 3}
// removeB := DeleteAt[string, int]("b")
// result := removeB(record) // {"a": 1, "c": 3}
//
// // Deleting from empty map returns empty map
// emptyRecord := Record[string, int]{}
// result2 := removeB(emptyRecord) // {}
func DeleteAt[K comparable, V any](k K) Operator[K, V, V] {
return G.DeleteAt[Record[K, V]](k)
}

View File

@@ -0,0 +1,552 @@
// Copyright (c) 2023 - 2025 IBM Corp.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package record
import (
"fmt"
"testing"
O "github.com/IBM/fp-go/v2/option"
P "github.com/IBM/fp-go/v2/pair"
SG "github.com/IBM/fp-go/v2/semigroup"
S "github.com/IBM/fp-go/v2/string"
"github.com/stretchr/testify/assert"
)
// TestNilMap_IsEmpty verifies that IsEmpty handles nil maps correctly
func TestNilMap_IsEmpty(t *testing.T) {
var nilMap Record[string, int]
assert.True(t, IsEmpty(nilMap), "nil map should be empty")
}
// TestNilMap_IsNonEmpty verifies that IsNonEmpty handles nil maps correctly
func TestNilMap_IsNonEmpty(t *testing.T) {
var nilMap Record[string, int]
assert.False(t, IsNonEmpty(nilMap), "nil map should not be non-empty")
}
// TestNilMap_Keys verifies that Keys handles nil maps correctly
func TestNilMap_Keys(t *testing.T) {
var nilMap Record[string, int]
keys := Keys(nilMap)
// Keys can return nil for empty map, which is a valid representation of an empty slice
assert.Equal(t, 0, len(keys), "Keys should return empty slice for nil map")
}
// TestNilMap_Values verifies that Values handles nil maps correctly
func TestNilMap_Values(t *testing.T) {
var nilMap Record[string, int]
values := Values(nilMap)
// Values can return nil for empty map, which is a valid representation of an empty slice
assert.Equal(t, 0, len(values), "Values should return empty slice for nil map")
}
// TestNilMap_Collect verifies that Collect handles nil maps correctly
func TestNilMap_Collect(t *testing.T) {
var nilMap Record[string, int]
collector := Collect(func(k string, v int) string {
return fmt.Sprintf("%s=%d", k, v)
})
result := collector(nilMap)
assert.NotNil(t, result, "Collect should return non-nil slice")
assert.Equal(t, 0, len(result), "Collect should return empty slice for nil map")
}
// TestNilMap_Reduce verifies that Reduce handles nil maps correctly
func TestNilMap_Reduce(t *testing.T) {
var nilMap Record[string, int]
reducer := Reduce[string](func(acc int, v int) int {
return acc + v
}, 10)
result := reducer(nilMap)
assert.Equal(t, 10, result, "Reduce should return initial value for nil map")
}
// TestNilMap_ReduceWithIndex verifies that ReduceWithIndex handles nil maps correctly
func TestNilMap_ReduceWithIndex(t *testing.T) {
var nilMap Record[string, int]
reducer := ReduceWithIndex(func(k string, acc int, v int) int {
return acc + v
}, 10)
result := reducer(nilMap)
assert.Equal(t, 10, result, "ReduceWithIndex should return initial value for nil map")
}
// TestNilMap_ReduceRef verifies that ReduceRef handles nil maps correctly
func TestNilMap_ReduceRef(t *testing.T) {
var nilMap Record[string, int]
reducer := ReduceRef[string](func(acc int, v *int) int {
return acc + *v
}, 10)
result := reducer(nilMap)
assert.Equal(t, 10, result, "ReduceRef should return initial value for nil map")
}
// TestNilMap_ReduceRefWithIndex verifies that ReduceRefWithIndex handles nil maps correctly
func TestNilMap_ReduceRefWithIndex(t *testing.T) {
var nilMap Record[string, int]
reducer := ReduceRefWithIndex(func(k string, acc int, v *int) int {
return acc + *v
}, 10)
result := reducer(nilMap)
assert.Equal(t, 10, result, "ReduceRefWithIndex should return initial value for nil map")
}
// TestNilMap_MonadMap verifies that MonadMap handles nil maps correctly
func TestNilMap_MonadMap(t *testing.T) {
var nilMap Record[string, int]
result := MonadMap(nilMap, func(v int) string {
return fmt.Sprintf("%d", v)
})
assert.NotNil(t, result, "MonadMap should return non-nil map")
assert.Equal(t, 0, len(result), "MonadMap should return empty map for nil input")
}
// TestNilMap_MonadMapWithIndex verifies that MonadMapWithIndex handles nil maps correctly
func TestNilMap_MonadMapWithIndex(t *testing.T) {
var nilMap Record[string, int]
result := MonadMapWithIndex(nilMap, func(k string, v int) string {
return fmt.Sprintf("%s=%d", k, v)
})
assert.NotNil(t, result, "MonadMapWithIndex should return non-nil map")
assert.Equal(t, 0, len(result), "MonadMapWithIndex should return empty map for nil input")
}
// TestNilMap_MonadMapRefWithIndex verifies that MonadMapRefWithIndex handles nil maps correctly
func TestNilMap_MonadMapRefWithIndex(t *testing.T) {
var nilMap Record[string, int]
result := MonadMapRefWithIndex(nilMap, func(k string, v *int) string {
return fmt.Sprintf("%s=%d", k, *v)
})
assert.NotNil(t, result, "MonadMapRefWithIndex should return non-nil map")
assert.Equal(t, 0, len(result), "MonadMapRefWithIndex should return empty map for nil input")
}
// TestNilMap_MonadMapRef verifies that MonadMapRef handles nil maps correctly
func TestNilMap_MonadMapRef(t *testing.T) {
var nilMap Record[string, int]
result := MonadMapRef(nilMap, func(v *int) string {
return fmt.Sprintf("%d", *v)
})
assert.NotNil(t, result, "MonadMapRef should return non-nil map")
assert.Equal(t, 0, len(result), "MonadMapRef should return empty map for nil input")
}
// TestNilMap_Map verifies that Map handles nil maps correctly
func TestNilMap_Map(t *testing.T) {
var nilMap Record[string, int]
mapper := Map[string](func(v int) string {
return fmt.Sprintf("%d", v)
})
result := mapper(nilMap)
assert.NotNil(t, result, "Map should return non-nil map")
assert.Equal(t, 0, len(result), "Map should return empty map for nil input")
}
// TestNilMap_MapRef verifies that MapRef handles nil maps correctly
func TestNilMap_MapRef(t *testing.T) {
var nilMap Record[string, int]
mapper := MapRef[string](func(v *int) string {
return fmt.Sprintf("%d", *v)
})
result := mapper(nilMap)
assert.NotNil(t, result, "MapRef should return non-nil map")
assert.Equal(t, 0, len(result), "MapRef should return empty map for nil input")
}
// TestNilMap_MapWithIndex verifies that MapWithIndex handles nil maps correctly
func TestNilMap_MapWithIndex(t *testing.T) {
var nilMap Record[string, int]
mapper := MapWithIndex[string](func(k string, v int) string {
return fmt.Sprintf("%s=%d", k, v)
})
result := mapper(nilMap)
assert.NotNil(t, result, "MapWithIndex should return non-nil map")
assert.Equal(t, 0, len(result), "MapWithIndex should return empty map for nil input")
}
// TestNilMap_MapRefWithIndex verifies that MapRefWithIndex handles nil maps correctly
func TestNilMap_MapRefWithIndex(t *testing.T) {
var nilMap Record[string, int]
mapper := MapRefWithIndex[string](func(k string, v *int) string {
return fmt.Sprintf("%s=%d", k, *v)
})
result := mapper(nilMap)
assert.NotNil(t, result, "MapRefWithIndex should return non-nil map")
assert.Equal(t, 0, len(result), "MapRefWithIndex should return empty map for nil input")
}
// TestNilMap_Lookup verifies that Lookup handles nil maps correctly
func TestNilMap_Lookup(t *testing.T) {
var nilMap Record[string, int]
lookup := Lookup[int]("key")
result := lookup(nilMap)
assert.True(t, O.IsNone(result), "Lookup should return None for nil map")
}
// TestNilMap_MonadLookup verifies that MonadLookup handles nil maps correctly
func TestNilMap_MonadLookup(t *testing.T) {
var nilMap Record[string, int]
result := MonadLookup(nilMap, "key")
assert.True(t, O.IsNone(result), "MonadLookup should return None for nil map")
}
// TestNilMap_Has verifies that Has handles nil maps correctly
func TestNilMap_Has(t *testing.T) {
var nilMap Record[string, int]
result := Has("key", nilMap)
assert.False(t, result, "Has should return false for nil map")
}
// TestNilMap_Union verifies that Union handles nil maps correctly
func TestNilMap_Union(t *testing.T) {
var nilMap Record[string, int]
nonNilMap := Record[string, int]{"a": 1, "b": 2}
semigroup := SG.Last[int]()
union := Union[string](semigroup)
// nil union non-nil
result1 := union(nonNilMap)(nilMap)
assert.Equal(t, nonNilMap, result1, "nil union non-nil should return non-nil map")
// non-nil union nil
result2 := union(nilMap)(nonNilMap)
assert.Equal(t, nonNilMap, result2, "non-nil union nil should return non-nil map")
// nil union nil - returns nil when both inputs are nil (optimization)
result3 := union(nilMap)(nilMap)
assert.Nil(t, result3, "nil union nil returns nil")
}
// TestNilMap_Merge verifies that Merge handles nil maps correctly
func TestNilMap_Merge(t *testing.T) {
var nilMap Record[string, int]
nonNilMap := Record[string, int]{"a": 1, "b": 2}
// nil merge non-nil
result1 := Merge(nonNilMap)(nilMap)
assert.Equal(t, nonNilMap, result1, "nil merge non-nil should return non-nil map")
// non-nil merge nil
result2 := Merge(nilMap)(nonNilMap)
assert.Equal(t, nonNilMap, result2, "non-nil merge nil should return non-nil map")
// nil merge nil - returns nil when both inputs are nil (optimization)
result3 := Merge(nilMap)(nilMap)
assert.Nil(t, result3, "nil merge nil returns nil")
}
// TestNilMap_Size verifies that Size handles nil maps correctly
func TestNilMap_Size(t *testing.T) {
var nilMap Record[string, int]
result := Size(nilMap)
assert.Equal(t, 0, result, "Size should return 0 for nil map")
}
// TestNilMap_ToArray verifies that ToArray handles nil maps correctly
func TestNilMap_ToArray(t *testing.T) {
var nilMap Record[string, int]
result := ToArray(nilMap)
assert.NotNil(t, result, "ToArray should return non-nil slice")
assert.Equal(t, 0, len(result), "ToArray should return empty slice for nil map")
}
// TestNilMap_ToEntries verifies that ToEntries handles nil maps correctly
func TestNilMap_ToEntries(t *testing.T) {
var nilMap Record[string, int]
result := ToEntries(nilMap)
assert.NotNil(t, result, "ToEntries should return non-nil slice")
assert.Equal(t, 0, len(result), "ToEntries should return empty slice for nil map")
}
// TestNilMap_UpsertAt verifies that UpsertAt handles nil maps correctly
func TestNilMap_UpsertAt(t *testing.T) {
var nilMap Record[string, int]
upsert := UpsertAt("key", 42)
result := upsert(nilMap)
assert.NotNil(t, result, "UpsertAt should return non-nil map")
assert.Equal(t, 1, len(result), "UpsertAt should create map with one entry")
assert.Equal(t, 42, result["key"], "UpsertAt should insert value correctly")
}
// TestNilMap_DeleteAt verifies that DeleteAt handles nil maps correctly
func TestNilMap_DeleteAt(t *testing.T) {
var nilMap Record[string, int]
deleteFunc := DeleteAt[string, int]("key")
result := deleteFunc(nilMap)
// DeleteAt returns the identical map for nil input (idempotent operation)
assert.Nil(t, result, "DeleteAt should return nil for nil input (idempotent)")
assert.Equal(t, nilMap, result, "DeleteAt should return identical map for nil input")
// Verify that deleting from empty (non-nil) map returns identical map (idempotent)
emptyMap := Record[string, int]{}
result2 := deleteFunc(emptyMap)
assert.NotNil(t, result2, "DeleteAt should return non-nil map for empty input")
assert.Equal(t, 0, len(result2), "DeleteAt should return empty map for empty input")
assert.Equal(t, emptyMap, result2, "DeleteAt on empty map should be idempotent")
}
// TestNilMap_Filter verifies that Filter handles nil maps correctly
func TestNilMap_Filter(t *testing.T) {
var nilMap Record[string, int]
filter := Filter[string, int](func(k string) bool {
return true
})
result := filter(nilMap)
assert.NotNil(t, result, "Filter should return non-nil map")
assert.Equal(t, 0, len(result), "Filter should return empty map for nil input")
}
// TestNilMap_FilterWithIndex verifies that FilterWithIndex handles nil maps correctly
func TestNilMap_FilterWithIndex(t *testing.T) {
var nilMap Record[string, int]
filter := FilterWithIndex[string, int](func(k string, v int) bool {
return true
})
result := filter(nilMap)
assert.NotNil(t, result, "FilterWithIndex should return non-nil map")
assert.Equal(t, 0, len(result), "FilterWithIndex should return empty map for nil input")
}
// TestNilMap_IsNil verifies that IsNil handles nil maps correctly
func TestNilMap_IsNil(t *testing.T) {
var nilMap Record[string, int]
assert.True(t, IsNil(nilMap), "IsNil should return true for nil map")
nonNilMap := Record[string, int]{}
assert.False(t, IsNil(nonNilMap), "IsNil should return false for non-nil empty map")
}
// TestNilMap_IsNonNil verifies that IsNonNil handles nil maps correctly
func TestNilMap_IsNonNil(t *testing.T) {
var nilMap Record[string, int]
assert.False(t, IsNonNil(nilMap), "IsNonNil should return false for nil map")
nonNilMap := Record[string, int]{}
assert.True(t, IsNonNil(nonNilMap), "IsNonNil should return true for non-nil empty map")
}
// TestNilMap_MonadChainWithIndex verifies that MonadChainWithIndex handles nil maps correctly
func TestNilMap_MonadChainWithIndex(t *testing.T) {
var nilMap Record[string, int]
monoid := MergeMonoid[string, string]()
result := MonadChainWithIndex(monoid, nilMap, func(k string, v int) Record[string, string] {
return Record[string, string]{k: fmt.Sprintf("%d", v)}
})
assert.NotNil(t, result, "MonadChainWithIndex should return non-nil map")
assert.Equal(t, 0, len(result), "MonadChainWithIndex should return empty map for nil input")
}
// TestNilMap_MonadChain verifies that MonadChain handles nil maps correctly
func TestNilMap_MonadChain(t *testing.T) {
var nilMap Record[string, int]
monoid := MergeMonoid[string, string]()
result := MonadChain(monoid, nilMap, func(v int) Record[string, string] {
return Record[string, string]{"key": fmt.Sprintf("%d", v)}
})
assert.NotNil(t, result, "MonadChain should return non-nil map")
assert.Equal(t, 0, len(result), "MonadChain should return empty map for nil input")
}
// TestNilMap_ChainWithIndex verifies that ChainWithIndex handles nil maps correctly
func TestNilMap_ChainWithIndex(t *testing.T) {
var nilMap Record[string, int]
monoid := MergeMonoid[string, string]()
chain := ChainWithIndex[int, string](monoid)(func(k string, v int) Record[string, string] {
return Record[string, string]{k: fmt.Sprintf("%d", v)}
})
result := chain(nilMap)
assert.NotNil(t, result, "ChainWithIndex should return non-nil map")
assert.Equal(t, 0, len(result), "ChainWithIndex should return empty map for nil input")
}
// TestNilMap_Chain verifies that Chain handles nil maps correctly
func TestNilMap_Chain(t *testing.T) {
var nilMap Record[string, int]
monoid := MergeMonoid[string, string]()
chain := Chain[int, string](monoid)(func(v int) Record[string, string] {
return Record[string, string]{"key": fmt.Sprintf("%d", v)}
})
result := chain(nilMap)
assert.NotNil(t, result, "Chain should return non-nil map")
assert.Equal(t, 0, len(result), "Chain should return empty map for nil input")
}
// TestNilMap_Flatten verifies that Flatten handles nil maps correctly
func TestNilMap_Flatten(t *testing.T) {
var nilMap Record[string, Record[string, int]]
monoid := MergeMonoid[string, int]()
flatten := Flatten[string, int](monoid)
result := flatten(nilMap)
assert.NotNil(t, result, "Flatten should return non-nil map")
assert.Equal(t, 0, len(result), "Flatten should return empty map for nil input")
}
// TestNilMap_Copy verifies that Copy handles nil maps correctly
func TestNilMap_Copy(t *testing.T) {
var nilMap Record[string, int]
result := Copy(nilMap)
assert.NotNil(t, result, "Copy should return non-nil map")
assert.Equal(t, 0, len(result), "Copy should return empty map for nil input")
}
// TestNilMap_Clone verifies that Clone handles nil maps correctly
func TestNilMap_Clone(t *testing.T) {
var nilMap Record[string, int]
clone := Clone[string, int](func(v int) int { return v * 2 })
result := clone(nilMap)
assert.NotNil(t, result, "Clone should return non-nil map")
assert.Equal(t, 0, len(result), "Clone should return empty map for nil input")
}
// TestNilMap_FromArray verifies that FromArray handles nil/empty arrays correctly
func TestNilMap_FromArray(t *testing.T) {
semigroup := SG.Last[int]()
fromArray := FromArray[string, int](semigroup)
// Test with nil slice
var nilSlice Entries[string, int]
result1 := fromArray(nilSlice)
assert.NotNil(t, result1, "FromArray should return non-nil map for nil slice")
assert.Equal(t, 0, len(result1), "FromArray should return empty map for nil slice")
// Test with empty slice
emptySlice := Entries[string, int]{}
result2 := fromArray(emptySlice)
assert.NotNil(t, result2, "FromArray should return non-nil map for empty slice")
assert.Equal(t, 0, len(result2), "FromArray should return empty map for empty slice")
}
// TestNilMap_MonadAp verifies that MonadAp handles nil maps correctly
func TestNilMap_MonadAp(t *testing.T) {
var nilFab Record[string, func(int) string]
var nilFa Record[string, int]
monoid := MergeMonoid[string, string]()
// nil functions, nil values
result1 := MonadAp(monoid, nilFab, nilFa)
assert.NotNil(t, result1, "MonadAp should return non-nil map")
assert.Equal(t, 0, len(result1), "MonadAp should return empty map for nil inputs")
// nil functions, non-nil values
nonNilFa := Record[string, int]{"a": 1}
result2 := MonadAp(monoid, nilFab, nonNilFa)
assert.NotNil(t, result2, "MonadAp should return non-nil map")
assert.Equal(t, 0, len(result2), "MonadAp should return empty map when functions are nil")
// non-nil functions, nil values
nonNilFab := Record[string, func(int) string]{"a": func(v int) string { return fmt.Sprintf("%d", v) }}
result3 := MonadAp(monoid, nonNilFab, nilFa)
assert.NotNil(t, result3, "MonadAp should return non-nil map")
assert.Equal(t, 0, len(result3), "MonadAp should return empty map when values are nil")
}
// TestNilMap_Of verifies that Of creates a proper singleton map
func TestNilMap_Of(t *testing.T) {
result := Of("key", 42)
assert.NotNil(t, result, "Of should return non-nil map")
assert.Equal(t, 1, len(result), "Of should create map with one entry")
assert.Equal(t, 42, result["key"], "Of should set value correctly")
}
// TestNilMap_FromEntries verifies that FromEntries handles nil/empty slices correctly
func TestNilMap_FromEntries(t *testing.T) {
// Test with nil slice
var nilSlice Entries[string, int]
result1 := FromEntries(nilSlice)
assert.NotNil(t, result1, "FromEntries should return non-nil map for nil slice")
assert.Equal(t, 0, len(result1), "FromEntries should return empty map for nil slice")
// Test with empty slice
emptySlice := Entries[string, int]{}
result2 := FromEntries(emptySlice)
assert.NotNil(t, result2, "FromEntries should return non-nil map for empty slice")
assert.Equal(t, 0, len(result2), "FromEntries should return empty map for empty slice")
// Test with actual entries
entries := Entries[string, int]{
P.MakePair("a", 1),
P.MakePair("b", 2),
}
result3 := FromEntries(entries)
assert.NotNil(t, result3, "FromEntries should return non-nil map")
assert.Equal(t, 2, len(result3), "FromEntries should create map with correct size")
assert.Equal(t, 1, result3["a"], "FromEntries should set values correctly")
assert.Equal(t, 2, result3["b"], "FromEntries should set values correctly")
}
// TestNilMap_Singleton verifies that Singleton creates a proper singleton map
func TestNilMap_Singleton(t *testing.T) {
result := Singleton("key", 42)
assert.NotNil(t, result, "Singleton should return non-nil map")
assert.Equal(t, 1, len(result), "Singleton should create map with one entry")
assert.Equal(t, 42, result["key"], "Singleton should set value correctly")
}
// TestNilMap_Empty verifies that Empty creates an empty non-nil map
func TestNilMap_Empty(t *testing.T) {
result := Empty[string, int]()
assert.NotNil(t, result, "Empty should return non-nil map")
assert.Equal(t, 0, len(result), "Empty should return empty map")
assert.False(t, IsNil(result), "Empty should not return nil map")
}
// TestNilMap_ConstNil verifies that ConstNil returns a nil map
func TestNilMap_ConstNil(t *testing.T) {
result := ConstNil[string, int]()
assert.Nil(t, result, "ConstNil should return nil map")
assert.True(t, IsNil(result), "ConstNil should return nil map")
}
// TestNilMap_FoldMap verifies that FoldMap handles nil maps correctly
func TestNilMap_FoldMap(t *testing.T) {
var nilMap Record[string, int]
monoid := S.Monoid
foldMap := FoldMap[string, int, string](monoid)(func(v int) string {
return fmt.Sprintf("%d", v)
})
result := foldMap(nilMap)
assert.Equal(t, "", result, "FoldMap should return empty value for nil map")
}
// TestNilMap_FoldMapWithIndex verifies that FoldMapWithIndex handles nil maps correctly
func TestNilMap_FoldMapWithIndex(t *testing.T) {
var nilMap Record[string, int]
monoid := S.Monoid
foldMap := FoldMapWithIndex[string, int, string](monoid)(func(k string, v int) string {
return fmt.Sprintf("%s=%d", k, v)
})
result := foldMap(nilMap)
assert.Equal(t, "", result, "FoldMapWithIndex should return empty value for nil map")
}
// TestNilMap_Fold verifies that Fold handles nil maps correctly
func TestNilMap_Fold(t *testing.T) {
var nilMap Record[string, string]
monoid := S.Monoid
fold := Fold[string](monoid)
result := fold(nilMap)
assert.Equal(t, "", result, "Fold should return empty value for nil map")
}
// TestNilMap_MonadFlap verifies that MonadFlap handles nil maps correctly
func TestNilMap_MonadFlap(t *testing.T) {
var nilMap Record[string, func(int) string]
result := MonadFlap(nilMap, 42)
assert.NotNil(t, result, "MonadFlap should return non-nil map")
assert.Equal(t, 0, len(result), "MonadFlap should return empty map for nil input")
}