mirror of
https://github.com/go-kit/kit.git
synced 2025-07-17 01:12:38 +02:00
Support push model for service discovery
This commit is contained in:
5
.gitignore
vendored
5
.gitignore
vendored
@ -43,3 +43,8 @@ Session.vim
|
||||
# auto-generated tag files
|
||||
tags
|
||||
|
||||
# dependency management files
|
||||
glide.lock
|
||||
glide.yaml
|
||||
vendor/
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
set -e
|
||||
|
||||
function go_files { find . -name '*_test.go' ; }
|
||||
function filter { grep -v '/_' ; }
|
||||
function filter { grep -v -e '/_' -e vendor ; }
|
||||
function remove_relative_prefix { sed -e 's/^\.\///g' ; }
|
||||
|
||||
function directories {
|
||||
|
@ -84,18 +84,19 @@ func main() {
|
||||
tags = []string{}
|
||||
passingOnly = true
|
||||
endpoints = addsvc.Endpoints{}
|
||||
instancer = consulsd.NewInstancer(client, logger, "addsvc", tags, passingOnly)
|
||||
)
|
||||
{
|
||||
factory := addsvcFactory(addsvc.MakeSumEndpoint, tracer, logger)
|
||||
subscriber := consulsd.NewSubscriber(client, factory, logger, "addsvc", tags, passingOnly)
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
endpointer := sd.NewEndpointer(instancer, factory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(*retryMax, *retryTimeout, balancer)
|
||||
endpoints.SumEndpoint = retry
|
||||
}
|
||||
{
|
||||
factory := addsvcFactory(addsvc.MakeConcatEndpoint, tracer, logger)
|
||||
subscriber := consulsd.NewSubscriber(client, factory, logger, "addsvc", tags, passingOnly)
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
endpointer := sd.NewEndpointer(instancer, factory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(*retryMax, *retryTimeout, balancer)
|
||||
endpoints.ConcatEndpoint = retry
|
||||
}
|
||||
@ -120,18 +121,19 @@ func main() {
|
||||
passingOnly = true
|
||||
uppercase endpoint.Endpoint
|
||||
count endpoint.Endpoint
|
||||
instancer = consulsd.NewInstancer(client, logger, "stringsvc", tags, passingOnly)
|
||||
)
|
||||
{
|
||||
factory := stringsvcFactory(ctx, "GET", "/uppercase")
|
||||
subscriber := consulsd.NewSubscriber(client, factory, logger, "stringsvc", tags, passingOnly)
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
endpointer := sd.NewEndpointer(instancer, factory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(*retryMax, *retryTimeout, balancer)
|
||||
uppercase = retry
|
||||
}
|
||||
{
|
||||
factory := stringsvcFactory(ctx, "GET", "/count")
|
||||
subscriber := consulsd.NewSubscriber(client, factory, logger, "stringsvc", tags, passingOnly)
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
endpointer := sd.NewEndpointer(instancer, factory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(*retryMax, *retryTimeout, balancer)
|
||||
count = retry
|
||||
}
|
||||
|
@ -40,68 +40,91 @@ func New(consulAddr string, logger log.Logger) (profilesvc.Service, error) {
|
||||
|
||||
var (
|
||||
sdclient = consul.NewClient(apiclient)
|
||||
instancer = consul.NewInstancer(sdclient, logger, consulService, consulTags, passingOnly)
|
||||
endpoints profilesvc.Endpoints
|
||||
)
|
||||
// TODO: thought experiment
|
||||
mapping := []struct {
|
||||
factory func(s profilesvc.Service) endpoint.Endpoint
|
||||
endpoint *endpoint.Endpoint
|
||||
}{
|
||||
{
|
||||
factory: profilesvc.MakePostProfileEndpoint,
|
||||
endpoint: &endpoints.PostProfileEndpoint,
|
||||
},
|
||||
{
|
||||
factory: profilesvc.MakeGetProfileEndpoint,
|
||||
endpoint: &endpoints.GetProfileEndpoint,
|
||||
},
|
||||
}
|
||||
for _, m := range mapping {
|
||||
factory := factoryFor(m.factory)
|
||||
endpointer := sd.NewEndpointer(instancer, factory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(retryMax, retryTimeout, balancer)
|
||||
*m.endpoint = retry
|
||||
}
|
||||
// TODO: why not 2 lines per endpoint registration above instead of 7 lines per endpoint below?
|
||||
{
|
||||
factory := factoryFor(profilesvc.MakePostProfileEndpoint)
|
||||
subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly)
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
endpointer := sd.NewEndpointer(instancer, factory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(retryMax, retryTimeout, balancer)
|
||||
endpoints.PostProfileEndpoint = retry
|
||||
}
|
||||
{
|
||||
factory := factoryFor(profilesvc.MakeGetProfileEndpoint)
|
||||
subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly)
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
endpointer := sd.NewEndpointer(instancer, factory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(retryMax, retryTimeout, balancer)
|
||||
endpoints.GetProfileEndpoint = retry
|
||||
}
|
||||
{
|
||||
factory := factoryFor(profilesvc.MakePutProfileEndpoint)
|
||||
subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly)
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
endpointer := sd.NewEndpointer(instancer, factory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(retryMax, retryTimeout, balancer)
|
||||
endpoints.PutProfileEndpoint = retry
|
||||
}
|
||||
{
|
||||
factory := factoryFor(profilesvc.MakePatchProfileEndpoint)
|
||||
subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly)
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
endpointer := sd.NewEndpointer(instancer, factory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(retryMax, retryTimeout, balancer)
|
||||
endpoints.PatchProfileEndpoint = retry
|
||||
}
|
||||
{
|
||||
factory := factoryFor(profilesvc.MakeDeleteProfileEndpoint)
|
||||
subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly)
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
endpointer := sd.NewEndpointer(instancer, factory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(retryMax, retryTimeout, balancer)
|
||||
endpoints.DeleteProfileEndpoint = retry
|
||||
}
|
||||
{
|
||||
factory := factoryFor(profilesvc.MakeGetAddressesEndpoint)
|
||||
subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly)
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
endpointer := sd.NewEndpointer(instancer, factory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(retryMax, retryTimeout, balancer)
|
||||
endpoints.GetAddressesEndpoint = retry
|
||||
}
|
||||
{
|
||||
factory := factoryFor(profilesvc.MakeGetAddressEndpoint)
|
||||
subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly)
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
endpointer := sd.NewEndpointer(instancer, factory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(retryMax, retryTimeout, balancer)
|
||||
endpoints.GetAddressEndpoint = retry
|
||||
}
|
||||
{
|
||||
factory := factoryFor(profilesvc.MakePostAddressEndpoint)
|
||||
subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly)
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
endpointer := sd.NewEndpointer(instancer, factory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(retryMax, retryTimeout, balancer)
|
||||
endpoints.PostAddressEndpoint = retry
|
||||
}
|
||||
{
|
||||
factory := factoryFor(profilesvc.MakeDeleteAddressEndpoint)
|
||||
subscriber := consul.NewSubscriber(sdclient, factory, logger, consulService, consulTags, passingOnly)
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
endpointer := sd.NewEndpointer(instancer, factory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(retryMax, retryTimeout, balancer)
|
||||
endpoints.DeleteAddressEndpoint = retry
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ func proxyingMiddleware(ctx context.Context, instances string, logger log.Logger
|
||||
// discovery system.
|
||||
var (
|
||||
instanceList = split(instances)
|
||||
subscriber sd.FixedSubscriber
|
||||
endpointer sd.FixedEndpointer
|
||||
)
|
||||
logger.Log("proxy_to", fmt.Sprint(instanceList))
|
||||
for _, instance := range instanceList {
|
||||
@ -48,12 +48,12 @@ func proxyingMiddleware(ctx context.Context, instances string, logger log.Logger
|
||||
e = makeUppercaseProxy(ctx, instance)
|
||||
e = circuitbreaker.Gobreaker(gobreaker.NewCircuitBreaker(gobreaker.Settings{}))(e)
|
||||
e = ratelimit.NewTokenBucketLimiter(jujuratelimit.NewBucketWithRate(float64(qps), int64(qps)))(e)
|
||||
subscriber = append(subscriber, e)
|
||||
endpointer = append(endpointer, e)
|
||||
}
|
||||
|
||||
// Now, build a single, retrying, load-balancing endpoint out of all of
|
||||
// those individual endpoints.
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(maxAttempts, maxTime, balancer)
|
||||
|
||||
// And finally, return the ServiceMiddleware, implemented by proxymw.
|
||||
|
@ -1,4 +1,4 @@
|
||||
package cache
|
||||
package sd
|
||||
|
||||
import (
|
||||
"io"
|
||||
@ -14,12 +14,12 @@ func BenchmarkEndpoints(b *testing.B) {
|
||||
cb = make(closer)
|
||||
cmap = map[string]io.Closer{"a": ca, "b": cb}
|
||||
factory = func(instance string) (endpoint.Endpoint, io.Closer, error) { return endpoint.Nop, cmap[instance], nil }
|
||||
c = New(factory, log.NewNopLogger())
|
||||
c = newEndpointCache(factory, log.NewNopLogger(), endpointerOptions{})
|
||||
)
|
||||
|
||||
b.ReportAllocs()
|
||||
|
||||
c.Update([]string{"a", "b"})
|
||||
c.Update(Event{Instances: []string{"a", "b"}})
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
136
sd/cache.go
Normal file
136
sd/cache.go
Normal file
@ -0,0 +1,136 @@
|
||||
package sd
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/log"
|
||||
)
|
||||
|
||||
// endpointCache collects the most recent set of instances from a service discovery
|
||||
// system, creates endpoints for them using a factory function, and makes
|
||||
// them available to consumers.
|
||||
type endpointCache struct {
|
||||
options endpointerOptions
|
||||
mtx sync.RWMutex
|
||||
factory Factory
|
||||
cache map[string]endpointCloser
|
||||
err error
|
||||
endpoints []endpoint.Endpoint
|
||||
logger log.Logger
|
||||
invalidateDeadline time.Time
|
||||
}
|
||||
|
||||
type endpointCloser struct {
|
||||
endpoint.Endpoint
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// newEndpointCache returns a new, empty endpointCache.
|
||||
func newEndpointCache(factory Factory, logger log.Logger, options endpointerOptions) *endpointCache {
|
||||
return &endpointCache{
|
||||
options: options,
|
||||
factory: factory,
|
||||
cache: map[string]endpointCloser{},
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// Update should be invoked by clients with a complete set of current instance
|
||||
// strings whenever that set changes. The cache manufactures new endpoints via
|
||||
// the factory, closes old endpoints when they disappear, and persists existing
|
||||
// endpoints if they survive through an update.
|
||||
func (c *endpointCache) Update(event Event) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
if event.Err == nil {
|
||||
c.updateCache(event.Instances)
|
||||
c.invalidateDeadline = time.Time{}
|
||||
c.err = nil
|
||||
}
|
||||
|
||||
c.logger.Log("err", event.Err)
|
||||
|
||||
if c.options.invalidateOnErrorTimeout == nil {
|
||||
// keep returning the last known endpoints on error
|
||||
return
|
||||
}
|
||||
|
||||
c.err = event.Err
|
||||
|
||||
if !c.invalidateDeadline.IsZero() {
|
||||
// aleady in the error state, do nothing
|
||||
return
|
||||
}
|
||||
// set new deadline to invalidate Endpoints unless non-error Event is received
|
||||
c.invalidateDeadline = time.Now().Add(*c.options.invalidateOnErrorTimeout)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *endpointCache) updateCache(instances []string) {
|
||||
// Deterministic order (for later).
|
||||
sort.Strings(instances)
|
||||
|
||||
// Produce the current set of services.
|
||||
cache := make(map[string]endpointCloser, len(instances))
|
||||
for _, instance := range instances {
|
||||
// If it already exists, just copy it over.
|
||||
if sc, ok := c.cache[instance]; ok {
|
||||
cache[instance] = sc
|
||||
delete(c.cache, instance)
|
||||
continue
|
||||
}
|
||||
|
||||
// If it doesn't exist, create it.
|
||||
service, closer, err := c.factory(instance)
|
||||
if err != nil {
|
||||
c.logger.Log("instance", instance, "err", err)
|
||||
continue
|
||||
}
|
||||
cache[instance] = endpointCloser{service, closer}
|
||||
}
|
||||
|
||||
// Close any leftover endpoints.
|
||||
for _, sc := range c.cache {
|
||||
if sc.Closer != nil {
|
||||
sc.Closer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Populate the slice of endpoints.
|
||||
endpoints := make([]endpoint.Endpoint, 0, len(cache))
|
||||
for _, instance := range instances {
|
||||
// A bad factory may mean an instance is not present.
|
||||
if _, ok := cache[instance]; !ok {
|
||||
continue
|
||||
}
|
||||
endpoints = append(endpoints, cache[instance].Endpoint)
|
||||
}
|
||||
|
||||
// Swap and trigger GC for old copies.
|
||||
c.endpoints = endpoints
|
||||
c.cache = cache
|
||||
}
|
||||
|
||||
// Endpoints yields the current set of (presumably identical) endpoints, ordered
|
||||
// lexicographically by the corresponding instance string.
|
||||
func (c *endpointCache) Endpoints() ([]endpoint.Endpoint, error) {
|
||||
c.mtx.RLock()
|
||||
|
||||
if c.err == nil || time.Now().Before(c.invalidateDeadline) {
|
||||
defer c.mtx.RUnlock()
|
||||
return c.endpoints, nil
|
||||
}
|
||||
|
||||
c.mtx.RUnlock()
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
c.updateCache(nil) // close any remaining active endpoints
|
||||
|
||||
return nil, c.err
|
||||
}
|
96
sd/cache/cache.go
vendored
96
sd/cache/cache.go
vendored
@ -1,96 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/sd"
|
||||
)
|
||||
|
||||
// Cache collects the most recent set of endpoints from a service discovery
|
||||
// system via a subscriber, and makes them available to consumers. Cache is
|
||||
// meant to be embedded inside of a concrete subscriber, and can serve Service
|
||||
// invocations directly.
|
||||
type Cache struct {
|
||||
mtx sync.RWMutex
|
||||
factory sd.Factory
|
||||
cache map[string]endpointCloser
|
||||
slice atomic.Value // []endpoint.Endpoint
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
type endpointCloser struct {
|
||||
endpoint.Endpoint
|
||||
io.Closer
|
||||
}
|
||||
|
||||
// New returns a new, empty endpoint cache.
|
||||
func New(factory sd.Factory, logger log.Logger) *Cache {
|
||||
return &Cache{
|
||||
factory: factory,
|
||||
cache: map[string]endpointCloser{},
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// Update should be invoked by clients with a complete set of current instance
|
||||
// strings whenever that set changes. The cache manufactures new endpoints via
|
||||
// the factory, closes old endpoints when they disappear, and persists existing
|
||||
// endpoints if they survive through an update.
|
||||
func (c *Cache) Update(instances []string) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
// Deterministic order (for later).
|
||||
sort.Strings(instances)
|
||||
|
||||
// Produce the current set of services.
|
||||
cache := make(map[string]endpointCloser, len(instances))
|
||||
for _, instance := range instances {
|
||||
// If it already exists, just copy it over.
|
||||
if sc, ok := c.cache[instance]; ok {
|
||||
cache[instance] = sc
|
||||
delete(c.cache, instance)
|
||||
continue
|
||||
}
|
||||
|
||||
// If it doesn't exist, create it.
|
||||
service, closer, err := c.factory(instance)
|
||||
if err != nil {
|
||||
c.logger.Log("instance", instance, "err", err)
|
||||
continue
|
||||
}
|
||||
cache[instance] = endpointCloser{service, closer}
|
||||
}
|
||||
|
||||
// Close any leftover endpoints.
|
||||
for _, sc := range c.cache {
|
||||
if sc.Closer != nil {
|
||||
sc.Closer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Populate the slice of endpoints.
|
||||
slice := make([]endpoint.Endpoint, 0, len(cache))
|
||||
for _, instance := range instances {
|
||||
// A bad factory may mean an instance is not present.
|
||||
if _, ok := cache[instance]; !ok {
|
||||
continue
|
||||
}
|
||||
slice = append(slice, cache[instance].Endpoint)
|
||||
}
|
||||
|
||||
// Swap and trigger GC for old copies.
|
||||
c.slice.Store(slice)
|
||||
c.cache = cache
|
||||
}
|
||||
|
||||
// Endpoints yields the current set of (presumably identical) endpoints, ordered
|
||||
// lexicographically by the corresponding instance string.
|
||||
func (c *Cache) Endpoints() []endpoint.Endpoint {
|
||||
return c.slice.Load().([]endpoint.Endpoint)
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package cache
|
||||
package sd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@ -16,11 +16,11 @@ func TestCache(t *testing.T) {
|
||||
cb = make(closer)
|
||||
c = map[string]io.Closer{"a": ca, "b": cb}
|
||||
f = func(instance string) (endpoint.Endpoint, io.Closer, error) { return endpoint.Nop, c[instance], nil }
|
||||
cache = New(f, log.NewNopLogger())
|
||||
cache = newEndpointCache(f, log.NewNopLogger(), endpointerOptions{})
|
||||
)
|
||||
|
||||
// Populate
|
||||
cache.Update([]string{"a", "b"})
|
||||
cache.Update(Event{Instances: []string{"a", "b"}})
|
||||
select {
|
||||
case <-ca:
|
||||
t.Errorf("endpoint a closed, not good")
|
||||
@ -29,12 +29,10 @@ func TestCache(t *testing.T) {
|
||||
case <-time.After(time.Millisecond):
|
||||
t.Logf("no closures yet, good")
|
||||
}
|
||||
if want, have := 2, len(cache.Endpoints()); want != have {
|
||||
t.Errorf("want %d, have %d", want, have)
|
||||
}
|
||||
assertEndpointsLen(t, cache, 2)
|
||||
|
||||
// Duplicate, should be no-op
|
||||
cache.Update([]string{"a", "b"})
|
||||
cache.Update(Event{Instances: []string{"a", "b"}})
|
||||
select {
|
||||
case <-ca:
|
||||
t.Errorf("endpoint a closed, not good")
|
||||
@ -43,12 +41,10 @@ func TestCache(t *testing.T) {
|
||||
case <-time.After(time.Millisecond):
|
||||
t.Logf("no closures yet, good")
|
||||
}
|
||||
if want, have := 2, len(cache.Endpoints()); want != have {
|
||||
t.Errorf("want %d, have %d", want, have)
|
||||
}
|
||||
assertEndpointsLen(t, cache, 2)
|
||||
|
||||
// Delete b
|
||||
go cache.Update([]string{"a"})
|
||||
go cache.Update(Event{Instances: []string{"a"}})
|
||||
select {
|
||||
case <-ca:
|
||||
t.Errorf("endpoint a closed, not good")
|
||||
@ -57,12 +53,10 @@ func TestCache(t *testing.T) {
|
||||
case <-time.After(time.Second):
|
||||
t.Errorf("didn't close the deleted instance in time")
|
||||
}
|
||||
if want, have := 1, len(cache.Endpoints()); want != have {
|
||||
t.Errorf("want %d, have %d", want, have)
|
||||
}
|
||||
assertEndpointsLen(t, cache, 1)
|
||||
|
||||
// Delete a
|
||||
go cache.Update([]string{})
|
||||
go cache.Update(Event{Instances: []string{}})
|
||||
select {
|
||||
// case <-cb: will succeed, as it's closed
|
||||
case <-ca:
|
||||
@ -70,18 +64,25 @@ func TestCache(t *testing.T) {
|
||||
case <-time.After(time.Second):
|
||||
t.Errorf("didn't close the deleted instance in time")
|
||||
}
|
||||
if want, have := 0, len(cache.Endpoints()); want != have {
|
||||
t.Errorf("want %d, have %d", want, have)
|
||||
}
|
||||
assertEndpointsLen(t, cache, 0)
|
||||
}
|
||||
|
||||
func TestBadFactory(t *testing.T) {
|
||||
cache := New(func(string) (endpoint.Endpoint, io.Closer, error) {
|
||||
cache := newEndpointCache(func(string) (endpoint.Endpoint, io.Closer, error) {
|
||||
return nil, nil, errors.New("bad factory")
|
||||
}, log.NewNopLogger())
|
||||
}, log.NewNopLogger(), endpointerOptions{})
|
||||
|
||||
cache.Update([]string{"foo:1234", "bar:5678"})
|
||||
if want, have := 0, len(cache.Endpoints()); want != have {
|
||||
cache.Update(Event{Instances: []string{"foo:1234", "bar:5678"}})
|
||||
assertEndpointsLen(t, cache, 0)
|
||||
}
|
||||
|
||||
func assertEndpointsLen(t *testing.T, cache *endpointCache, l int) {
|
||||
endpoints, err := cache.Endpoints()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error %v", err)
|
||||
return
|
||||
}
|
||||
if want, have := l, len(endpoints); want != have {
|
||||
t.Errorf("want %d, have %d", want, have)
|
||||
}
|
||||
}
|
@ -1,2 +1,2 @@
|
||||
// Package consul provides subscriber and registrar implementations for Consul.
|
||||
// Package consul provides Instancer and Registrar implementations for Consul.
|
||||
package consul
|
||||
|
@ -6,35 +6,30 @@ import (
|
||||
|
||||
consul "github.com/hashicorp/consul/api"
|
||||
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/sd"
|
||||
"github.com/go-kit/kit/sd/cache"
|
||||
"github.com/go-kit/kit/sd/internal/instance"
|
||||
)
|
||||
|
||||
const defaultIndex = 0
|
||||
|
||||
// Subscriber yields endpoints for a service in Consul. Updates to the service
|
||||
// are watched and will update the Subscriber endpoints.
|
||||
type Subscriber struct {
|
||||
cache *cache.Cache
|
||||
// Instancer yields instances for a service in Consul.
|
||||
type Instancer struct {
|
||||
instance.Cache
|
||||
client Client
|
||||
logger log.Logger
|
||||
service string
|
||||
tags []string
|
||||
passingOnly bool
|
||||
endpointsc chan []endpoint.Endpoint
|
||||
quitc chan struct{}
|
||||
}
|
||||
|
||||
var _ sd.Subscriber = &Subscriber{}
|
||||
|
||||
// NewSubscriber returns a Consul subscriber which returns endpoints for the
|
||||
// NewInstancer returns a Consul instancer that publishes instances for the
|
||||
// requested service. It only returns instances for which all of the passed tags
|
||||
// are present.
|
||||
func NewSubscriber(client Client, factory sd.Factory, logger log.Logger, service string, tags []string, passingOnly bool) *Subscriber {
|
||||
s := &Subscriber{
|
||||
cache: cache.New(factory, logger),
|
||||
func NewInstancer(client Client, logger log.Logger, service string, tags []string, passingOnly bool) *Instancer {
|
||||
s := &Instancer{
|
||||
Cache: *instance.NewCache(),
|
||||
client: client,
|
||||
logger: log.With(logger, "service", service, "tags", fmt.Sprint(tags)),
|
||||
service: service,
|
||||
@ -50,22 +45,17 @@ func NewSubscriber(client Client, factory sd.Factory, logger log.Logger, service
|
||||
s.logger.Log("err", err)
|
||||
}
|
||||
|
||||
s.cache.Update(instances)
|
||||
s.Update(sd.Event{Instances: instances, Err: err})
|
||||
go s.loop(index)
|
||||
return s
|
||||
}
|
||||
|
||||
// Endpoints implements the Subscriber interface.
|
||||
func (s *Subscriber) Endpoints() ([]endpoint.Endpoint, error) {
|
||||
return s.cache.Endpoints(), nil
|
||||
}
|
||||
|
||||
// Stop terminates the subscriber.
|
||||
func (s *Subscriber) Stop() {
|
||||
// Stop terminates the instancer.
|
||||
func (s *Instancer) Stop() {
|
||||
close(s.quitc)
|
||||
}
|
||||
|
||||
func (s *Subscriber) loop(lastIndex uint64) {
|
||||
func (s *Instancer) loop(lastIndex uint64) {
|
||||
var (
|
||||
instances []string
|
||||
err error
|
||||
@ -77,13 +67,14 @@ func (s *Subscriber) loop(lastIndex uint64) {
|
||||
return // stopped via quitc
|
||||
case err != nil:
|
||||
s.logger.Log("err", err)
|
||||
s.Update(sd.Event{Err: err})
|
||||
default:
|
||||
s.cache.Update(instances)
|
||||
s.Update(sd.Event{Instances: instances})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Subscriber) getInstances(lastIndex uint64, interruptc chan struct{}) ([]string, uint64, error) {
|
||||
func (s *Instancer) getInstances(lastIndex uint64, interruptc chan struct{}) ([]string, uint64, error) {
|
||||
tag := ""
|
||||
if len(s.tags) > 0 {
|
||||
tag = s.tags[0]
|
@ -7,8 +7,11 @@ import (
|
||||
consul "github.com/hashicorp/consul/api"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/sd"
|
||||
)
|
||||
|
||||
var _ sd.Instancer = &Instancer{} // API check
|
||||
|
||||
var consulState = []*consul.ServiceEntry{
|
||||
{
|
||||
Node: &consul.Node{
|
||||
@ -57,77 +60,69 @@ var consulState = []*consul.ServiceEntry{
|
||||
},
|
||||
}
|
||||
|
||||
func TestSubscriber(t *testing.T) {
|
||||
func TestInstancer(t *testing.T) {
|
||||
var (
|
||||
logger = log.NewNopLogger()
|
||||
client = newTestClient(consulState)
|
||||
)
|
||||
|
||||
s := NewSubscriber(client, testFactory, logger, "search", []string{"api"}, true)
|
||||
s := NewInstancer(client, logger, "search", []string{"api"}, true)
|
||||
defer s.Stop()
|
||||
|
||||
endpoints, err := s.Endpoints()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, have := 2, len(endpoints); want != have {
|
||||
state := s.State()
|
||||
if want, have := 2, len(state.Instances); want != have {
|
||||
t.Errorf("want %d, have %d", want, have)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubscriberNoService(t *testing.T) {
|
||||
func TestInstancerNoService(t *testing.T) {
|
||||
var (
|
||||
logger = log.NewNopLogger()
|
||||
client = newTestClient(consulState)
|
||||
)
|
||||
|
||||
s := NewSubscriber(client, testFactory, logger, "feed", []string{}, true)
|
||||
s := NewInstancer(client, logger, "feed", []string{}, true)
|
||||
defer s.Stop()
|
||||
|
||||
endpoints, err := s.Endpoints()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, have := 0, len(endpoints); want != have {
|
||||
state := s.State()
|
||||
if want, have := 0, len(state.Instances); want != have {
|
||||
t.Fatalf("want %d, have %d", want, have)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubscriberWithTags(t *testing.T) {
|
||||
func TestInstancerWithTags(t *testing.T) {
|
||||
var (
|
||||
logger = log.NewNopLogger()
|
||||
client = newTestClient(consulState)
|
||||
)
|
||||
|
||||
s := NewSubscriber(client, testFactory, logger, "search", []string{"api", "v2"}, true)
|
||||
s := NewInstancer(client, logger, "search", []string{"api", "v2"}, true)
|
||||
defer s.Stop()
|
||||
|
||||
endpoints, err := s.Endpoints()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, have := 1, len(endpoints); want != have {
|
||||
state := s.State()
|
||||
if want, have := 1, len(state.Instances); want != have {
|
||||
t.Fatalf("want %d, have %d", want, have)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubscriberAddressOverride(t *testing.T) {
|
||||
s := NewSubscriber(newTestClient(consulState), testFactory, log.NewNopLogger(), "search", []string{"db"}, true)
|
||||
func TestInstancerAddressOverride(t *testing.T) {
|
||||
s := NewInstancer(newTestClient(consulState), log.NewNopLogger(), "search", []string{"db"}, true)
|
||||
defer s.Stop()
|
||||
|
||||
endpoints, err := s.Endpoints()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, have := 1, len(endpoints); want != have {
|
||||
state := s.State()
|
||||
if want, have := 1, len(state.Instances); want != have {
|
||||
t.Fatalf("want %d, have %d", want, have)
|
||||
}
|
||||
|
||||
response, err := endpoints[0](context.Background(), struct{}{})
|
||||
endpoint, closer, err := testFactory(state.Instances[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if closer != nil {
|
||||
defer closer.Close()
|
||||
}
|
||||
|
||||
response, err := endpoint(context.Background(), struct{}{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/sd"
|
||||
stdconsul "github.com/hashicorp/consul/api"
|
||||
)
|
||||
|
||||
@ -38,24 +39,28 @@ func TestIntegration(t *testing.T) {
|
||||
// skipping check(s)
|
||||
}
|
||||
|
||||
// Build a subscriber on r.Name + r.Tags.
|
||||
// Build an Instancer on r.Name + r.Tags.
|
||||
factory := func(instance string) (endpoint.Endpoint, io.Closer, error) {
|
||||
t.Logf("factory invoked for %q", instance)
|
||||
return endpoint.Nop, nil, nil
|
||||
}
|
||||
subscriber := NewSubscriber(
|
||||
instancer := NewInstancer(
|
||||
client,
|
||||
factory,
|
||||
log.With(logger, "component", "subscriber"),
|
||||
log.With(logger, "component", "instancer"),
|
||||
r.Name,
|
||||
r.Tags,
|
||||
true,
|
||||
)
|
||||
endpointer := sd.NewEndpointer(
|
||||
instancer,
|
||||
factory,
|
||||
log.With(logger, "component", "endpointer"),
|
||||
)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// Before we publish, we should have no endpoints.
|
||||
endpoints, err := subscriber.Endpoints()
|
||||
endpoints, err := endpointer.Endpoints()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -71,7 +76,7 @@ func TestIntegration(t *testing.T) {
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// Now we should have one active endpoints.
|
||||
endpoints, err = subscriber.Endpoints()
|
||||
endpoints, err = endpointer.Endpoints()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -1,2 +1,2 @@
|
||||
// Package dnssrv provides a subscriber implementation for DNS SRV records.
|
||||
// Package dnssrv provides an Instancer implementation for DNS SRV records.
|
||||
package dnssrv
|
||||
|
@ -5,44 +5,41 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/sd"
|
||||
"github.com/go-kit/kit/sd/cache"
|
||||
"github.com/go-kit/kit/sd/internal/instance"
|
||||
)
|
||||
|
||||
// Subscriber yields endpoints taken from the named DNS SRV record. The name is
|
||||
// Instancer yields instances from the named DNS SRV record. The name is
|
||||
// resolved on a fixed schedule. Priorities and weights are ignored.
|
||||
type Subscriber struct {
|
||||
type Instancer struct {
|
||||
instance.Cache
|
||||
name string
|
||||
cache *cache.Cache
|
||||
logger log.Logger
|
||||
quit chan struct{}
|
||||
}
|
||||
|
||||
// NewSubscriber returns a DNS SRV subscriber.
|
||||
func NewSubscriber(
|
||||
// NewInstancer returns a DNS SRV instancer.
|
||||
func NewInstancer(
|
||||
name string,
|
||||
ttl time.Duration,
|
||||
factory sd.Factory,
|
||||
logger log.Logger,
|
||||
) *Subscriber {
|
||||
return NewSubscriberDetailed(name, time.NewTicker(ttl), net.LookupSRV, factory, logger)
|
||||
) *Instancer {
|
||||
return NewInstancerDetailed(name, time.NewTicker(ttl), net.LookupSRV, logger)
|
||||
}
|
||||
|
||||
// NewSubscriberDetailed is the same as NewSubscriber, but allows users to
|
||||
// NewInstancerDetailed is the same as NewInstancer, but allows users to
|
||||
// provide an explicit lookup refresh ticker instead of a TTL, and specify the
|
||||
// lookup function instead of using net.LookupSRV.
|
||||
func NewSubscriberDetailed(
|
||||
func NewInstancerDetailed(
|
||||
name string,
|
||||
refresh *time.Ticker,
|
||||
lookup Lookup,
|
||||
factory sd.Factory,
|
||||
logger log.Logger,
|
||||
) *Subscriber {
|
||||
p := &Subscriber{
|
||||
) *Instancer {
|
||||
p := &Instancer{
|
||||
Cache: *instance.NewCache(),
|
||||
name: name,
|
||||
cache: cache.New(factory, logger),
|
||||
logger: logger,
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
@ -53,18 +50,18 @@ func NewSubscriberDetailed(
|
||||
} else {
|
||||
logger.Log("name", name, "err", err)
|
||||
}
|
||||
p.cache.Update(instances)
|
||||
p.Update(sd.Event{Instances: instances, Err: err})
|
||||
|
||||
go p.loop(refresh, lookup)
|
||||
return p
|
||||
}
|
||||
|
||||
// Stop terminates the Subscriber.
|
||||
func (p *Subscriber) Stop() {
|
||||
// Stop terminates the Instancer.
|
||||
func (p *Instancer) Stop() {
|
||||
close(p.quit)
|
||||
}
|
||||
|
||||
func (p *Subscriber) loop(t *time.Ticker, lookup Lookup) {
|
||||
func (p *Instancer) loop(t *time.Ticker, lookup Lookup) {
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
@ -72,9 +69,10 @@ func (p *Subscriber) loop(t *time.Ticker, lookup Lookup) {
|
||||
instances, err := p.resolve(lookup)
|
||||
if err != nil {
|
||||
p.logger.Log("name", p.name, "err", err)
|
||||
p.Update(sd.Event{Err: err})
|
||||
continue // don't replace potentially-good with bad
|
||||
}
|
||||
p.cache.Update(instances)
|
||||
p.Update(sd.Event{Instances: instances})
|
||||
|
||||
case <-p.quit:
|
||||
return
|
||||
@ -82,15 +80,10 @@ func (p *Subscriber) loop(t *time.Ticker, lookup Lookup) {
|
||||
}
|
||||
}
|
||||
|
||||
// Endpoints implements the Subscriber interface.
|
||||
func (p *Subscriber) Endpoints() ([]endpoint.Endpoint, error) {
|
||||
return p.cache.Endpoints(), nil
|
||||
}
|
||||
|
||||
func (p *Subscriber) resolve(lookup Lookup) ([]string, error) {
|
||||
func (p *Instancer) resolve(lookup Lookup) ([]string, error) {
|
||||
_, addrs, err := lookup("", "", p.name)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
return nil, err
|
||||
}
|
||||
instances := make([]string, len(addrs))
|
||||
for i, addr := range addrs {
|
@ -1,16 +1,17 @@
|
||||
package dnssrv
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/sd"
|
||||
)
|
||||
|
||||
var _ sd.Instancer = &Instancer{} // API check
|
||||
|
||||
func TestRefresh(t *testing.T) {
|
||||
name := "some.service.internal"
|
||||
|
||||
@ -27,30 +28,20 @@ func TestRefresh(t *testing.T) {
|
||||
return "cname", records, nil
|
||||
}
|
||||
|
||||
var generates uint64
|
||||
factory := func(instance string) (endpoint.Endpoint, io.Closer, error) {
|
||||
t.Logf("factory(%q)", instance)
|
||||
atomic.AddUint64(&generates, 1)
|
||||
return endpoint.Nop, nopCloser{}, nil
|
||||
}
|
||||
|
||||
subscriber := NewSubscriberDetailed(name, ticker, lookup, factory, log.NewNopLogger())
|
||||
defer subscriber.Stop()
|
||||
instancer := NewInstancerDetailed(name, ticker, lookup, log.NewNopLogger())
|
||||
defer instancer.Stop()
|
||||
|
||||
// First lookup, empty
|
||||
endpoints, err := subscriber.Endpoints()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
state := instancer.State()
|
||||
if state.Err != nil {
|
||||
t.Error(state.Err)
|
||||
}
|
||||
if want, have := 0, len(endpoints); want != have {
|
||||
if want, have := 0, len(state.Instances); want != have {
|
||||
t.Errorf("want %d, have %d", want, have)
|
||||
}
|
||||
if want, have := uint64(1), atomic.LoadUint64(&lookups); want != have {
|
||||
t.Errorf("want %d, have %d", want, have)
|
||||
}
|
||||
if want, have := uint64(0), atomic.LoadUint64(&generates); want != have {
|
||||
t.Errorf("want %d, have %d", want, have)
|
||||
}
|
||||
|
||||
// Load some records and lookup again
|
||||
records = []*net.SRV{
|
||||
@ -60,24 +51,21 @@ func TestRefresh(t *testing.T) {
|
||||
}
|
||||
tickc <- time.Now()
|
||||
|
||||
// There is a race condition where the subscriber.Endpoints call below
|
||||
// There is a race condition where the instancer.State call below
|
||||
// invokes the cache before it is updated by the tick above.
|
||||
// TODO(pb): solve by running the read through the loop goroutine.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
endpoints, err = subscriber.Endpoints()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
state = instancer.State()
|
||||
if state.Err != nil {
|
||||
t.Error(state.Err)
|
||||
}
|
||||
if want, have := 3, len(endpoints); want != have {
|
||||
if want, have := 3, len(state.Instances); want != have {
|
||||
t.Errorf("want %d, have %d", want, have)
|
||||
}
|
||||
if want, have := uint64(2), atomic.LoadUint64(&lookups); want != have {
|
||||
t.Errorf("want %d, have %d", want, have)
|
||||
}
|
||||
if want, have := uint64(len(records)), atomic.LoadUint64(&generates); want != have {
|
||||
t.Errorf("want %d, have %d", want, have)
|
||||
}
|
||||
}
|
||||
|
||||
type nopCloser struct{}
|
79
sd/endpointer.go
Normal file
79
sd/endpointer.go
Normal file
@ -0,0 +1,79 @@
|
||||
package sd
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/log"
|
||||
)
|
||||
|
||||
// Endpointer listens to a service discovery system and yields a set of
|
||||
// identical endpoints on demand. An error indicates a problem with connectivity
|
||||
// to the service discovery system, or within the system itself; an Endpointer
|
||||
// may yield no endpoints without error.
|
||||
type Endpointer interface {
|
||||
Endpoints() ([]endpoint.Endpoint, error)
|
||||
}
|
||||
|
||||
// FixedEndpointer yields a fixed set of endpoints.
|
||||
type FixedEndpointer []endpoint.Endpoint
|
||||
|
||||
// Endpoints implements Endpointer.
|
||||
func (s FixedEndpointer) Endpoints() ([]endpoint.Endpoint, error) { return s, nil }
|
||||
|
||||
// NewEndpointer creates an Endpointer that subscribes to updates from Instancer src
|
||||
// and uses factory f to create Endpoints. If src notifies of an error, the Endpointer
|
||||
// keeps returning previously created Endpoints assuming they are still good, unless
|
||||
// this behavior is disabled with ResetOnError option.
|
||||
func NewEndpointer(src Instancer, f Factory, logger log.Logger, options ...EndpointerOption) Endpointer {
|
||||
opts := endpointerOptions{}
|
||||
for _, opt := range options {
|
||||
opt(&opts)
|
||||
}
|
||||
se := &simpleEndpointer{
|
||||
endpointCache: *newEndpointCache(f, logger, opts),
|
||||
instancer: src,
|
||||
ch: make(chan Event),
|
||||
}
|
||||
go se.receive()
|
||||
src.Register(se.ch)
|
||||
return se
|
||||
}
|
||||
|
||||
// EndpointerOption allows control of endpointCache behavior.
|
||||
type EndpointerOption func(*endpointerOptions)
|
||||
|
||||
// InvalidateOnError returns EndpointerOption that controls how the Endpointer
|
||||
// behaves when then Instancer publishes an Event containing an error.
|
||||
// Without this option the Endpointer continues returning the last known
|
||||
// endpoints. With this option, the Endpointer continues returning the last
|
||||
// known endpoints until the timeout elapses, then closes all active endpoints
|
||||
// and starts returning an error. Once the Instancer sends a new update with
|
||||
// valid resource instances, the normal operation is resumed.
|
||||
func InvalidateOnError(timeout time.Duration) EndpointerOption {
|
||||
return func(opts *endpointerOptions) {
|
||||
opts.invalidateOnErrorTimeout = &timeout
|
||||
}
|
||||
}
|
||||
|
||||
type endpointerOptions struct {
|
||||
invalidateOnErrorTimeout *time.Duration
|
||||
}
|
||||
|
||||
type simpleEndpointer struct {
|
||||
endpointCache
|
||||
|
||||
instancer Instancer
|
||||
ch chan Event
|
||||
}
|
||||
|
||||
func (se *simpleEndpointer) receive() {
|
||||
for event := range se.ch {
|
||||
se.Update(event)
|
||||
}
|
||||
}
|
||||
|
||||
func (se *simpleEndpointer) Close() {
|
||||
se.instancer.Deregister(se.ch)
|
||||
close(se.ch)
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
// Package etcd provides a Subscriber and Registrar implementation for etcd. If
|
||||
// Package etcd provides an Instancer and Registrar implementation for etcd. If
|
||||
// you use etcd as your service discovery system, this package will help you
|
||||
// implement the registration and client-side load balancing patterns.
|
||||
package etcd
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/sd"
|
||||
"github.com/go-kit/kit/sd/lb"
|
||||
)
|
||||
|
||||
@ -44,16 +45,18 @@ func Example() {
|
||||
defer registrar.Deregister()
|
||||
|
||||
// It's likely that we'll also want to connect to other services and call
|
||||
// their methods. We can build a subscriber to listen for changes from etcd
|
||||
// and build endpoints, wrap it with a load-balancer to pick a single
|
||||
// their methods. We can build an Instancer to listen for changes from etcd,
|
||||
// create Endpointer, wrap it with a load-balancer to pick a single
|
||||
// endpoint, and finally wrap it with a retry strategy to get something that
|
||||
// can be used as an endpoint directly.
|
||||
barPrefix := "/services/barsvc"
|
||||
subscriber, err := NewSubscriber(client, barPrefix, barFactory, log.NewNopLogger())
|
||||
logger := log.NewNopLogger()
|
||||
instancer, err := NewInstancer(client, barPrefix, logger)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
balancer := lb.NewRoundRobin(subscriber)
|
||||
endpointer := sd.NewEndpointer(instancer, barFactory, logger)
|
||||
balancer := lb.NewRoundRobin(endpointer)
|
||||
retry := lb.Retry(3, 3*time.Second, balancer)
|
||||
|
||||
// And now retry can be used like any other endpoint.
|
||||
|
65
sd/etcd/instancer.go
Normal file
65
sd/etcd/instancer.go
Normal file
@ -0,0 +1,65 @@
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/sd"
|
||||
"github.com/go-kit/kit/sd/internal/instance"
|
||||
)
|
||||
|
||||
// Instancer yields instances stored in a certain etcd keyspace. Any kind of
|
||||
// change in that keyspace is watched and will update the Instancer's Instancers.
|
||||
type Instancer struct {
|
||||
instance.Cache
|
||||
client Client
|
||||
prefix string
|
||||
logger log.Logger
|
||||
quitc chan struct{}
|
||||
}
|
||||
|
||||
// NewInstancer returns an etcd instancer. It will start watching the given
|
||||
// prefix for changes, and update the subscribers.
|
||||
func NewInstancer(c Client, prefix string, logger log.Logger) (*Instancer, error) {
|
||||
s := &Instancer{
|
||||
client: c,
|
||||
prefix: prefix,
|
||||
Cache: *instance.NewCache(),
|
||||
logger: logger,
|
||||
quitc: make(chan struct{}),
|
||||
}
|
||||
|
||||
instances, err := s.client.GetEntries(s.prefix)
|
||||
if err == nil {
|
||||
logger.Log("prefix", s.prefix, "instances", len(instances))
|
||||
} else {
|
||||
logger.Log("prefix", s.prefix, "err", err)
|
||||
}
|
||||
s.Update(sd.Event{Instances: instances, Err: err})
|
||||
|
||||
go s.loop()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Instancer) loop() {
|
||||
ch := make(chan struct{})
|
||||
go s.client.WatchPrefix(s.prefix, ch)
|
||||
for {
|
||||
select {
|
||||
case <-ch:
|
||||
instances, err := s.client.GetEntries(s.prefix)
|
||||
if err != nil {
|
||||
s.logger.Log("msg", "failed to retrieve entries", "err", err)
|
||||
s.Update(sd.Event{Err: err})
|
||||
continue
|
||||
}
|
||||
s.Update(sd.Event{Instances: instances})
|
||||
|
||||
case <-s.quitc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop terminates the Instancer.
|
||||
func (s *Instancer) Stop() {
|
||||
close(s.quitc)
|
||||
}
|
@ -2,13 +2,12 @@ package etcd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
stdetcd "github.com/coreos/etcd/client"
|
||||
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/sd"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -24,48 +23,21 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
func TestSubscriber(t *testing.T) {
|
||||
factory := func(string) (endpoint.Endpoint, io.Closer, error) {
|
||||
return endpoint.Nop, nil, nil
|
||||
}
|
||||
var _ sd.Instancer = &Instancer{} // API check
|
||||
|
||||
func TestInstancer(t *testing.T) {
|
||||
client := &fakeClient{
|
||||
responses: map[string]*stdetcd.Response{"/foo": fakeResponse},
|
||||
}
|
||||
|
||||
s, err := NewSubscriber(client, "/foo", factory, log.NewNopLogger())
|
||||
s, err := NewInstancer(client, "/foo", log.NewNopLogger())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.Stop()
|
||||
|
||||
if _, err := s.Endpoints(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBadFactory(t *testing.T) {
|
||||
factory := func(string) (endpoint.Endpoint, io.Closer, error) {
|
||||
return nil, nil, errors.New("kaboom")
|
||||
}
|
||||
|
||||
client := &fakeClient{
|
||||
responses: map[string]*stdetcd.Response{"/foo": fakeResponse},
|
||||
}
|
||||
|
||||
s, err := NewSubscriber(client, "/foo", factory, log.NewNopLogger())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.Stop()
|
||||
|
||||
endpoints, err := s.Endpoints()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, have := 0, len(endpoints); want != have {
|
||||
t.Errorf("want %d, have %d", want, have)
|
||||
if state := s.State(); state.Err != nil {
|
||||
t.Fatal(state.Err)
|
||||
}
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/sd"
|
||||
)
|
||||
|
||||
// Package sd/etcd provides a wrapper around the etcd key/value store. This
|
||||
@ -67,24 +68,28 @@ func TestIntegration(t *testing.T) {
|
||||
t.Fatalf("want %q, have %q", want, have)
|
||||
}
|
||||
|
||||
subscriber, err := NewSubscriber(
|
||||
instancer, err := NewInstancer(
|
||||
client,
|
||||
prefix,
|
||||
func(string) (endpoint.Endpoint, io.Closer, error) { return endpoint.Nop, nil, nil },
|
||||
log.With(log.NewLogfmtLogger(os.Stderr), "component", "subscriber"),
|
||||
log.With(log.NewLogfmtLogger(os.Stderr), "component", "instancer"),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("NewSubscriber: %v", err)
|
||||
t.Fatalf("NewInstancer: %v", err)
|
||||
}
|
||||
t.Logf("Constructed Subscriber OK")
|
||||
endpointer := sd.NewEndpointer(
|
||||
instancer,
|
||||
func(string) (endpoint.Endpoint, io.Closer, error) { return endpoint.Nop, nil, nil },
|
||||
log.With(log.NewLogfmtLogger(os.Stderr), "component", "instancer"),
|
||||
)
|
||||
t.Logf("Constructed Endpointer OK")
|
||||
|
||||
if !within(time.Second, func() bool {
|
||||
endpoints, err := subscriber.Endpoints()
|
||||
endpoints, err := endpointer.Endpoints()
|
||||
return err == nil && len(endpoints) == 1
|
||||
}) {
|
||||
t.Fatalf("Subscriber didn't see Register in time")
|
||||
t.Fatalf("Endpointer didn't see Register in time")
|
||||
}
|
||||
t.Logf("Subscriber saw Register OK")
|
||||
t.Logf("Endpointer saw Register OK")
|
||||
|
||||
// Deregister first instance of test data.
|
||||
registrar.Deregister()
|
||||
@ -92,11 +97,11 @@ func TestIntegration(t *testing.T) {
|
||||
|
||||
// Check it was deregistered.
|
||||
if !within(time.Second, func() bool {
|
||||
endpoints, err := subscriber.Endpoints()
|
||||
endpoints, err := endpointer.Endpoints()
|
||||
t.Logf("Checking Deregister: len(endpoints) = %d, err = %v", len(endpoints), err)
|
||||
return err == nil && len(endpoints) == 0
|
||||
}) {
|
||||
t.Fatalf("Subscriber didn't see Deregister in time")
|
||||
t.Fatalf("Endpointer didn't see Deregister in time")
|
||||
}
|
||||
|
||||
// Verify test data no longer exists in etcd.
|
||||
|
@ -1,72 +0,0 @@
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/sd"
|
||||
"github.com/go-kit/kit/sd/cache"
|
||||
)
|
||||
|
||||
// Subscriber yield endpoints stored in a certain etcd keyspace. Any kind of
|
||||
// change in that keyspace is watched and will update the Subscriber endpoints.
|
||||
type Subscriber struct {
|
||||
client Client
|
||||
prefix string
|
||||
cache *cache.Cache
|
||||
logger log.Logger
|
||||
quitc chan struct{}
|
||||
}
|
||||
|
||||
var _ sd.Subscriber = &Subscriber{}
|
||||
|
||||
// NewSubscriber returns an etcd subscriber. It will start watching the given
|
||||
// prefix for changes, and update the endpoints.
|
||||
func NewSubscriber(c Client, prefix string, factory sd.Factory, logger log.Logger) (*Subscriber, error) {
|
||||
s := &Subscriber{
|
||||
client: c,
|
||||
prefix: prefix,
|
||||
cache: cache.New(factory, logger),
|
||||
logger: logger,
|
||||
quitc: make(chan struct{}),
|
||||
}
|
||||
|
||||
instances, err := s.client.GetEntries(s.prefix)
|
||||
if err == nil {
|
||||
logger.Log("prefix", s.prefix, "instances", len(instances))
|
||||
} else {
|
||||
logger.Log("prefix", s.prefix, "err", err)
|
||||
}
|
||||
s.cache.Update(instances)
|
||||
|
||||
go s.loop()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Subscriber) loop() {
|
||||
ch := make(chan struct{})
|
||||
go s.client.WatchPrefix(s.prefix, ch)
|
||||
for {
|
||||
select {
|
||||
case <-ch:
|
||||
instances, err := s.client.GetEntries(s.prefix)
|
||||
if err != nil {
|
||||
s.logger.Log("msg", "failed to retrieve entries", "err", err)
|
||||
continue
|
||||
}
|
||||
s.cache.Update(instances)
|
||||
|
||||
case <-s.quitc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Endpoints implements the Subscriber interface.
|
||||
func (s *Subscriber) Endpoints() ([]endpoint.Endpoint, error) {
|
||||
return s.cache.Endpoints(), nil
|
||||
}
|
||||
|
||||
// Stop terminates the Subscriber.
|
||||
func (s *Subscriber) Stop() {
|
||||
close(s.quitc)
|
||||
}
|
@ -1,2 +1,2 @@
|
||||
// Package eureka provides subscriber and registrar implementations for Netflix OSS's Eureka
|
||||
// Package eureka provides Instancer and Registrar implementations for Netflix OSS's Eureka
|
||||
package eureka
|
||||
|
@ -5,37 +5,32 @@ import (
|
||||
|
||||
"github.com/hudl/fargo"
|
||||
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/sd"
|
||||
"github.com/go-kit/kit/sd/cache"
|
||||
"github.com/go-kit/kit/sd/internal/instance"
|
||||
)
|
||||
|
||||
// Subscriber yields endpoints stored in the Eureka registry for the given app.
|
||||
// Changes in that app are watched and will update the Subscriber endpoints.
|
||||
type Subscriber struct {
|
||||
conn fargoConnection
|
||||
app string
|
||||
factory sd.Factory
|
||||
logger log.Logger
|
||||
cache *cache.Cache
|
||||
quitc chan chan struct{}
|
||||
// Instancer yields instances stored in the Eureka registry for the given app.
|
||||
// Changes in that app are watched and will update the subscribers.
|
||||
type Instancer struct {
|
||||
instance.Cache
|
||||
conn fargoConnection
|
||||
app string
|
||||
logger log.Logger
|
||||
quitc chan chan struct{}
|
||||
}
|
||||
|
||||
var _ sd.Subscriber = (*Subscriber)(nil)
|
||||
|
||||
// NewSubscriber returns a Eureka subscriber. It will start watching the given
|
||||
// app string for changes, and update the endpoints accordingly.
|
||||
func NewSubscriber(conn fargoConnection, app string, factory sd.Factory, logger log.Logger) *Subscriber {
|
||||
// NewInstancer returns a Eureka Instancer. It will start watching the given
|
||||
// app string for changes, and update the subscribers accordingly.
|
||||
func NewInstancer(conn fargoConnection, app string, logger log.Logger) *Instancer {
|
||||
logger = log.With(logger, "app", app)
|
||||
|
||||
s := &Subscriber{
|
||||
conn: conn,
|
||||
app: app,
|
||||
factory: factory,
|
||||
logger: logger,
|
||||
cache: cache.New(factory, logger),
|
||||
quitc: make(chan chan struct{}),
|
||||
s := &Instancer{
|
||||
Cache: *instance.NewCache(),
|
||||
conn: conn,
|
||||
app: app,
|
||||
logger: logger,
|
||||
quitc: make(chan chan struct{}),
|
||||
}
|
||||
|
||||
instances, err := s.getInstances()
|
||||
@ -45,25 +40,20 @@ func NewSubscriber(conn fargoConnection, app string, factory sd.Factory, logger
|
||||
s.logger.Log("during", "getInstances", "err", err)
|
||||
}
|
||||
|
||||
s.cache.Update(instances)
|
||||
s.Update(sd.Event{Instances: instances, Err: err})
|
||||
go s.loop()
|
||||
return s
|
||||
}
|
||||
|
||||
// Endpoints implements the Subscriber interface.
|
||||
func (s *Subscriber) Endpoints() ([]endpoint.Endpoint, error) {
|
||||
return s.cache.Endpoints(), nil
|
||||
}
|
||||
|
||||
// Stop terminates the subscriber.
|
||||
func (s *Subscriber) Stop() {
|
||||
// Stop terminates the Instancer.
|
||||
func (s *Instancer) Stop() {
|
||||
q := make(chan struct{})
|
||||
s.quitc <- q
|
||||
<-q
|
||||
s.quitc = nil
|
||||
}
|
||||
|
||||
func (s *Subscriber) loop() {
|
||||
func (s *Instancer) loop() {
|
||||
var (
|
||||
await = false
|
||||
done = make(chan struct{})
|
||||
@ -76,11 +66,12 @@ func (s *Subscriber) loop() {
|
||||
case update := <-updatec:
|
||||
if update.Err != nil {
|
||||
s.logger.Log("during", "Update", "err", update.Err)
|
||||
s.Update(sd.Event{Err: update.Err})
|
||||
continue
|
||||
}
|
||||
instances := convertFargoAppToInstances(update.App)
|
||||
s.logger.Log("instances", len(instances))
|
||||
s.cache.Update(instances)
|
||||
s.Update(sd.Event{Instances: instances})
|
||||
|
||||
case q := <-s.quitc:
|
||||
close(q)
|
||||
@ -89,7 +80,7 @@ func (s *Subscriber) loop() {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Subscriber) getInstances() ([]string, error) {
|
||||
func (s *Instancer) getInstances() ([]string, error) {
|
||||
app, err := s.conn.GetApp(s.app)
|
||||
if err != nil {
|
||||
return nil, err
|
@ -8,9 +8,12 @@ import (
|
||||
"github.com/hudl/fargo"
|
||||
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/sd"
|
||||
)
|
||||
|
||||
func TestSubscriber(t *testing.T) {
|
||||
var _ sd.Instancer = &Instancer{} // API check
|
||||
|
||||
func TestInstancer(t *testing.T) {
|
||||
factory := func(string) (endpoint.Endpoint, io.Closer, error) {
|
||||
return endpoint.Nop, nil, nil
|
||||
}
|
||||
@ -21,10 +24,11 @@ func TestSubscriber(t *testing.T) {
|
||||
errApplication: nil,
|
||||
}
|
||||
|
||||
subscriber := NewSubscriber(connection, appNameTest, factory, loggerTest)
|
||||
defer subscriber.Stop()
|
||||
instancer := NewInstancer(connection, appNameTest, loggerTest)
|
||||
defer instancer.Stop()
|
||||
endpointer := sd.NewEndpointer(instancer, factory, loggerTest)
|
||||
|
||||
endpoints, err := subscriber.Endpoints()
|
||||
endpoints, err := endpointer.Endpoints()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -34,7 +38,7 @@ func TestSubscriber(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubscriberScheduleUpdates(t *testing.T) {
|
||||
func TestInstancerScheduleUpdates(t *testing.T) {
|
||||
factory := func(string) (endpoint.Endpoint, io.Closer, error) {
|
||||
return endpoint.Nop, nil, nil
|
||||
}
|
||||
@ -45,17 +49,18 @@ func TestSubscriberScheduleUpdates(t *testing.T) {
|
||||
errApplication: nil,
|
||||
}
|
||||
|
||||
subscriber := NewSubscriber(connection, appNameTest, factory, loggerTest)
|
||||
defer subscriber.Stop()
|
||||
instancer := NewInstancer(connection, appNameTest, loggerTest)
|
||||
defer instancer.Stop()
|
||||
endpointer := sd.NewEndpointer(instancer, factory, loggerTest)
|
||||
|
||||
endpoints, _ := subscriber.Endpoints()
|
||||
endpoints, _ := endpointer.Endpoints()
|
||||
if want, have := 1, len(endpoints); want != have {
|
||||
t.Errorf("want %d, have %d", want, have)
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
endpoints, _ = subscriber.Endpoints()
|
||||
endpoints, _ = endpointer.Endpoints()
|
||||
if want, have := 2, len(endpoints); want != have {
|
||||
t.Errorf("want %v, have %v", want, have)
|
||||
}
|
||||
@ -72,10 +77,11 @@ func TestBadFactory(t *testing.T) {
|
||||
errApplication: nil,
|
||||
}
|
||||
|
||||
subscriber := NewSubscriber(connection, appNameTest, factory, loggerTest)
|
||||
defer subscriber.Stop()
|
||||
instancer := NewInstancer(connection, appNameTest, loggerTest)
|
||||
defer instancer.Stop()
|
||||
endpointer := sd.NewEndpointer(instancer, factory, loggerTest)
|
||||
|
||||
endpoints, err := subscriber.Endpoints()
|
||||
endpoints, err := endpointer.Endpoints()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -85,7 +91,7 @@ func TestBadFactory(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBadSubscriberInstances(t *testing.T) {
|
||||
func TestBadInstancerInstances(t *testing.T) {
|
||||
factory := func(string) (endpoint.Endpoint, io.Closer, error) {
|
||||
return endpoint.Nop, nil, nil
|
||||
}
|
||||
@ -97,10 +103,11 @@ func TestBadSubscriberInstances(t *testing.T) {
|
||||
errApplication: nil,
|
||||
}
|
||||
|
||||
subscriber := NewSubscriber(connection, appNameTest, factory, loggerTest)
|
||||
defer subscriber.Stop()
|
||||
instancer := NewInstancer(connection, appNameTest, loggerTest)
|
||||
defer instancer.Stop()
|
||||
endpointer := sd.NewEndpointer(instancer, factory, loggerTest)
|
||||
|
||||
endpoints, err := subscriber.Endpoints()
|
||||
endpoints, err := endpointer.Endpoints()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -110,7 +117,7 @@ func TestBadSubscriberInstances(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBadSubscriberScheduleUpdates(t *testing.T) {
|
||||
func TestBadInstancerScheduleUpdates(t *testing.T) {
|
||||
factory := func(string) (endpoint.Endpoint, io.Closer, error) {
|
||||
return endpoint.Nop, nil, nil
|
||||
}
|
||||
@ -121,10 +128,11 @@ func TestBadSubscriberScheduleUpdates(t *testing.T) {
|
||||
errApplication: errTest,
|
||||
}
|
||||
|
||||
subscriber := NewSubscriber(connection, appNameTest, factory, loggerTest)
|
||||
defer subscriber.Stop()
|
||||
instancer := NewInstancer(connection, appNameTest, loggerTest)
|
||||
defer instancer.Stop()
|
||||
endpointer := sd.NewEndpointer(instancer, factory, loggerTest)
|
||||
|
||||
endpoints, err := subscriber.Endpoints()
|
||||
endpoints, err := endpointer.Endpoints()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -134,7 +142,7 @@ func TestBadSubscriberScheduleUpdates(t *testing.T) {
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
endpoints, err = subscriber.Endpoints()
|
||||
endpoints, err = endpointer.Endpoints()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/sd"
|
||||
)
|
||||
|
||||
// Package sd/eureka provides a wrapper around the Netflix Eureka service
|
||||
@ -54,16 +55,16 @@ func TestIntegration(t *testing.T) {
|
||||
t.Logf("factory invoked for %q", instance)
|
||||
return endpoint.Nop, nil, nil
|
||||
}
|
||||
s := NewSubscriber(
|
||||
instancer := NewInstancer(
|
||||
&fargoConnection,
|
||||
appNameTest,
|
||||
factory,
|
||||
log.With(logger, "component", "subscriber"),
|
||||
log.With(logger, "component", "instancer"),
|
||||
)
|
||||
defer s.Stop()
|
||||
defer instancer.Stop()
|
||||
endpointer := sd.NewEndpointer(instancer, factory, log.With(logger, "component", "endpointer"))
|
||||
|
||||
// We should have one endpoint immediately after subscriber instantiation.
|
||||
endpoints, err := s.Endpoints()
|
||||
endpoints, err := endpointer.Endpoints()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -81,7 +82,7 @@ func TestIntegration(t *testing.T) {
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Now we should have two endpoints.
|
||||
endpoints, err = s.Endpoints()
|
||||
endpoints, err = endpointer.Endpoints()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -96,7 +97,7 @@ func TestIntegration(t *testing.T) {
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// And then there was one.
|
||||
endpoints, err = s.Endpoints()
|
||||
endpoints, err = endpointer.Endpoints()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -1,9 +0,0 @@
|
||||
package sd
|
||||
|
||||
import "github.com/go-kit/kit/endpoint"
|
||||
|
||||
// FixedSubscriber yields a fixed set of services.
|
||||
type FixedSubscriber []endpoint.Endpoint
|
||||
|
||||
// Endpoints implements Subscriber.
|
||||
func (s FixedSubscriber) Endpoints() ([]endpoint.Endpoint, error) { return s, nil }
|
34
sd/instancer.go
Normal file
34
sd/instancer.go
Normal file
@ -0,0 +1,34 @@
|
||||
package sd
|
||||
|
||||
// Event represents a push notification generated from the underlying service discovery
|
||||
// implementation. It contains either a full set of available resource instances, or
|
||||
// an error indicating some issue with obtaining information from discovery backend.
|
||||
// Examples of errors may include loosing connection to the discovery backend, or
|
||||
// trying to look up resource instances using an incorrectly formatted key.
|
||||
// After receiving an Event with an error the listenter should treat previously discovered
|
||||
// resource instances as stale (although it may choose to continue using them).
|
||||
// If the Instancer is able to restore connection to the discovery backend it must push
|
||||
// another Event with the current set of resource instances.
|
||||
type Event struct {
|
||||
Instances []string
|
||||
Err error
|
||||
}
|
||||
|
||||
// Instancer listens to a service discovery system and notifies registered
|
||||
// observers of changes in the resource instances. Every event sent to the channels
|
||||
// contains a complete set of instances known to the Instancer. That complete set is
|
||||
// sent immediately upon registering the channel, and on any future updates from
|
||||
// discovery system.
|
||||
type Instancer interface {
|
||||
Register(chan<- Event)
|
||||
Deregister(chan<- Event)
|
||||
}
|
||||
|
||||
// FixedInstancer yields a fixed set of instances.
|
||||
type FixedInstancer []string
|
||||
|
||||
// Register implements Instancer.
|
||||
func (d FixedInstancer) Register(ch chan<- Event) { ch <- Event{Instances: d} }
|
||||
|
||||
// Deregister implements Instancer.
|
||||
func (d FixedInstancer) Deregister(ch chan<- Event) {}
|
79
sd/internal/instance/cache.go
Normal file
79
sd/internal/instance/cache.go
Normal file
@ -0,0 +1,79 @@
|
||||
package instance
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/go-kit/kit/sd"
|
||||
)
|
||||
|
||||
// Cache keeps track of resource instances provided to it via Update method
|
||||
// and implements the Instancer interface
|
||||
type Cache struct {
|
||||
mtx sync.RWMutex
|
||||
state sd.Event
|
||||
reg registry
|
||||
}
|
||||
|
||||
// NewCache creates a new Cache.
|
||||
func NewCache() *Cache {
|
||||
return &Cache{
|
||||
reg: registry{},
|
||||
}
|
||||
}
|
||||
|
||||
// Update receives new instances from service discovery, stores them internally,
|
||||
// and notifies all registered listeners.
|
||||
func (c *Cache) Update(event sd.Event) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
sort.Strings(event.Instances)
|
||||
if reflect.DeepEqual(c.state, event) {
|
||||
return // no need to broadcast the same instances
|
||||
}
|
||||
|
||||
c.state = event
|
||||
c.reg.broadcast(event)
|
||||
}
|
||||
|
||||
// State returns the current state of discovery (instances or error) as sd.Event
|
||||
func (c *Cache) State() sd.Event {
|
||||
c.mtx.RLock()
|
||||
defer c.mtx.RUnlock()
|
||||
return c.state
|
||||
}
|
||||
|
||||
// Register implements Instancer.
|
||||
func (c *Cache) Register(ch chan<- sd.Event) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
c.reg.register(ch)
|
||||
// always push the current state to new channels
|
||||
ch <- c.state
|
||||
}
|
||||
|
||||
// Deregister implements Instancer.
|
||||
func (c *Cache) Deregister(ch chan<- sd.Event) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
c.reg.deregister(ch)
|
||||
}
|
||||
|
||||
// registry is not goroutine-safe.
|
||||
type registry map[chan<- sd.Event]struct{}
|
||||
|
||||
func (r registry) broadcast(event sd.Event) {
|
||||
for c := range r {
|
||||
c <- event
|
||||
}
|
||||
}
|
||||
|
||||
func (r registry) register(c chan<- sd.Event) {
|
||||
r[c] = struct{}{}
|
||||
}
|
||||
|
||||
func (r registry) deregister(c chan<- sd.Event) {
|
||||
delete(r, c)
|
||||
}
|
78
sd/internal/instance/cache_test.go
Normal file
78
sd/internal/instance/cache_test.go
Normal file
@ -0,0 +1,78 @@
|
||||
package instance
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/kit/sd"
|
||||
)
|
||||
|
||||
var _ sd.Instancer = &Cache{} // API check
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
// TODO this test is not finished yet
|
||||
|
||||
c := NewCache()
|
||||
|
||||
{
|
||||
state := c.State()
|
||||
if want, have := 0, len(state.Instances); want != have {
|
||||
t.Fatalf("want %v instances, have %v", want, have)
|
||||
}
|
||||
}
|
||||
|
||||
notification1 := sd.Event{Instances: []string{"x", "y"}}
|
||||
notification2 := sd.Event{Instances: []string{"a", "b", "c"}}
|
||||
|
||||
c.Update(notification1)
|
||||
|
||||
// times 2 because we have two observers
|
||||
expectedInstances := 2 * (len(notification1.Instances) + len(notification2.Instances))
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(expectedInstances)
|
||||
|
||||
receiver := func(ch chan sd.Event) {
|
||||
for state := range ch {
|
||||
// count total number of instances received
|
||||
for range state.Instances {
|
||||
wg.Done()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f1 := make(chan sd.Event)
|
||||
f2 := make(chan sd.Event)
|
||||
go receiver(f1)
|
||||
go receiver(f2)
|
||||
|
||||
c.Register(f1)
|
||||
c.Register(f2)
|
||||
|
||||
c.Update(notification1)
|
||||
c.Update(notification2)
|
||||
|
||||
// if state := c.State(); instances == nil {
|
||||
// if want, have := len(notification2), len(instances); want != have {
|
||||
// t.Errorf("want length %v, have %v", want, have)
|
||||
// } else {
|
||||
// for i := range notification2 {
|
||||
// if want, have := notification2[i], instances[i]; want != have {
|
||||
// t.Errorf("want instance %v, have %v", want, have)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
close(f1)
|
||||
close(f2)
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// d.Deregister(f1)
|
||||
|
||||
// d.Unregister(f2)
|
||||
// if want, have := 0, len(d.observers); want != have {
|
||||
// t.Fatalf("want %v observers, have %v", want, have)
|
||||
// }
|
||||
}
|
@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
// NewRandom returns a load balancer that selects services randomly.
|
||||
func NewRandom(s sd.Subscriber, seed int64) Balancer {
|
||||
func NewRandom(s sd.Endpointer, seed int64) Balancer {
|
||||
return &random{
|
||||
s: s,
|
||||
r: rand.New(rand.NewSource(seed)),
|
||||
@ -16,7 +16,7 @@ func NewRandom(s sd.Subscriber, seed int64) Balancer {
|
||||
}
|
||||
|
||||
type random struct {
|
||||
s sd.Subscriber
|
||||
s sd.Endpointer
|
||||
r *rand.Rand
|
||||
}
|
||||
|
||||
|
@ -25,8 +25,8 @@ func TestRandom(t *testing.T) {
|
||||
endpoints[i] = func(context.Context, interface{}) (interface{}, error) { counts[i0]++; return struct{}{}, nil }
|
||||
}
|
||||
|
||||
subscriber := sd.FixedSubscriber(endpoints)
|
||||
balancer := NewRandom(subscriber, seed)
|
||||
endpointer := sd.FixedEndpointer(endpoints)
|
||||
balancer := NewRandom(endpointer, seed)
|
||||
|
||||
for i := 0; i < iterations; i++ {
|
||||
endpoint, _ := balancer.Endpoint()
|
||||
@ -42,8 +42,8 @@ func TestRandom(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRandomNoEndpoints(t *testing.T) {
|
||||
subscriber := sd.FixedSubscriber{}
|
||||
balancer := NewRandom(subscriber, 1415926)
|
||||
endpointer := sd.FixedEndpointer{}
|
||||
balancer := NewRandom(endpointer, 1415926)
|
||||
_, err := balancer.Endpoint()
|
||||
if want, have := ErrNoEndpoints, err; want != have {
|
||||
t.Errorf("want %v, have %v", want, have)
|
||||
|
@ -13,7 +13,7 @@ import (
|
||||
|
||||
func TestRetryMaxTotalFail(t *testing.T) {
|
||||
var (
|
||||
endpoints = sd.FixedSubscriber{} // no endpoints
|
||||
endpoints = sd.FixedEndpointer{} // no endpoints
|
||||
rr = lb.NewRoundRobin(endpoints)
|
||||
retry = lb.Retry(999, time.Second, rr) // lots of retries
|
||||
ctx = context.Background()
|
||||
@ -30,13 +30,13 @@ func TestRetryMaxPartialFail(t *testing.T) {
|
||||
func(context.Context, interface{}) (interface{}, error) { return nil, errors.New("error two") },
|
||||
func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil /* OK */ },
|
||||
}
|
||||
subscriber = sd.FixedSubscriber{
|
||||
endpointer = sd.FixedEndpointer{
|
||||
0: endpoints[0],
|
||||
1: endpoints[1],
|
||||
2: endpoints[2],
|
||||
}
|
||||
retries = len(endpoints) - 1 // not quite enough retries
|
||||
rr = lb.NewRoundRobin(subscriber)
|
||||
rr = lb.NewRoundRobin(endpointer)
|
||||
ctx = context.Background()
|
||||
)
|
||||
if _, err := lb.Retry(retries, time.Second, rr)(ctx, struct{}{}); err == nil {
|
||||
@ -51,13 +51,13 @@ func TestRetryMaxSuccess(t *testing.T) {
|
||||
func(context.Context, interface{}) (interface{}, error) { return nil, errors.New("error two") },
|
||||
func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil /* OK */ },
|
||||
}
|
||||
subscriber = sd.FixedSubscriber{
|
||||
endpointer = sd.FixedEndpointer{
|
||||
0: endpoints[0],
|
||||
1: endpoints[1],
|
||||
2: endpoints[2],
|
||||
}
|
||||
retries = len(endpoints) // exactly enough retries
|
||||
rr = lb.NewRoundRobin(subscriber)
|
||||
rr = lb.NewRoundRobin(endpointer)
|
||||
ctx = context.Background()
|
||||
)
|
||||
if _, err := lb.Retry(retries, time.Second, rr)(ctx, struct{}{}); err != nil {
|
||||
@ -70,7 +70,7 @@ func TestRetryTimeout(t *testing.T) {
|
||||
step = make(chan struct{})
|
||||
e = func(context.Context, interface{}) (interface{}, error) { <-step; return struct{}{}, nil }
|
||||
timeout = time.Millisecond
|
||||
retry = lb.Retry(999, timeout, lb.NewRoundRobin(sd.FixedSubscriber{0: e}))
|
||||
retry = lb.Retry(999, timeout, lb.NewRoundRobin(sd.FixedEndpointer{0: e}))
|
||||
errs = make(chan error, 1)
|
||||
invoke = func() { _, err := retry(context.Background(), struct{}{}); errs <- err }
|
||||
)
|
||||
@ -92,7 +92,7 @@ func TestAbortEarlyCustomMessage(t *testing.T) {
|
||||
var (
|
||||
myErr = errors.New("aborting early")
|
||||
cb = func(int, error) (bool, error) { return false, myErr }
|
||||
endpoints = sd.FixedSubscriber{} // no endpoints
|
||||
endpoints = sd.FixedEndpointer{} // no endpoints
|
||||
rr = lb.NewRoundRobin(endpoints)
|
||||
retry = lb.RetryWithCallback(time.Second, rr, cb) // lots of retries
|
||||
ctx = context.Background()
|
||||
@ -115,7 +115,7 @@ func TestErrorPassedUnchangedToCallback(t *testing.T) {
|
||||
endpoint = func(ctx context.Context, request interface{}) (interface{}, error) {
|
||||
return nil, myErr
|
||||
}
|
||||
endpoints = sd.FixedSubscriber{endpoint} // no endpoints
|
||||
endpoints = sd.FixedEndpointer{endpoint} // no endpoints
|
||||
rr = lb.NewRoundRobin(endpoints)
|
||||
retry = lb.RetryWithCallback(time.Second, rr, cb) // lots of retries
|
||||
ctx = context.Background()
|
||||
@ -128,10 +128,10 @@ func TestErrorPassedUnchangedToCallback(t *testing.T) {
|
||||
|
||||
func TestHandleNilCallback(t *testing.T) {
|
||||
var (
|
||||
subscriber = sd.FixedSubscriber{
|
||||
endpointer = sd.FixedEndpointer{
|
||||
func(context.Context, interface{}) (interface{}, error) { return struct{}{}, nil /* OK */ },
|
||||
}
|
||||
rr = lb.NewRoundRobin(subscriber)
|
||||
rr = lb.NewRoundRobin(endpointer)
|
||||
ctx = context.Background()
|
||||
)
|
||||
retry := lb.RetryWithCallback(time.Second, rr, nil)
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
// NewRoundRobin returns a load balancer that returns services in sequence.
|
||||
func NewRoundRobin(s sd.Subscriber) Balancer {
|
||||
func NewRoundRobin(s sd.Endpointer) Balancer {
|
||||
return &roundRobin{
|
||||
s: s,
|
||||
c: 0,
|
||||
@ -16,7 +16,7 @@ func NewRoundRobin(s sd.Subscriber) Balancer {
|
||||
}
|
||||
|
||||
type roundRobin struct {
|
||||
s sd.Subscriber
|
||||
s sd.Endpointer
|
||||
c uint64
|
||||
}
|
||||
|
||||
|
@ -22,8 +22,8 @@ func TestRoundRobin(t *testing.T) {
|
||||
}
|
||||
)
|
||||
|
||||
subscriber := sd.FixedSubscriber(endpoints)
|
||||
balancer := NewRoundRobin(subscriber)
|
||||
endpointer := sd.FixedEndpointer(endpoints)
|
||||
balancer := NewRoundRobin(endpointer)
|
||||
|
||||
for i, want := range [][]int{
|
||||
{1, 0, 0},
|
||||
@ -46,8 +46,8 @@ func TestRoundRobin(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRoundRobinNoEndpoints(t *testing.T) {
|
||||
subscriber := sd.FixedSubscriber{}
|
||||
balancer := NewRoundRobin(subscriber)
|
||||
endpointer := sd.FixedEndpointer{}
|
||||
balancer := NewRoundRobin(endpointer)
|
||||
_, err := balancer.Endpoint()
|
||||
if want, have := ErrNoEndpoints, err; want != have {
|
||||
t.Errorf("want %v, have %v", want, have)
|
||||
@ -55,7 +55,7 @@ func TestRoundRobinNoEndpoints(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRoundRobinNoRace(t *testing.T) {
|
||||
balancer := NewRoundRobin(sd.FixedSubscriber([]endpoint.Endpoint{
|
||||
balancer := NewRoundRobin(sd.FixedEndpointer([]endpoint.Endpoint{
|
||||
endpoint.Nop,
|
||||
endpoint.Nop,
|
||||
endpoint.Nop,
|
||||
|
@ -1,11 +0,0 @@
|
||||
package sd
|
||||
|
||||
import "github.com/go-kit/kit/endpoint"
|
||||
|
||||
// Subscriber listens to a service discovery system and yields a set of
|
||||
// identical endpoints on demand. An error indicates a problem with connectivity
|
||||
// to the service discovery system, or within the system itself; a subscriber
|
||||
// may yield no endpoints without error.
|
||||
type Subscriber interface {
|
||||
Endpoints() ([]endpoint.Endpoint, error)
|
||||
}
|
@ -107,15 +107,15 @@ func TestCreateParentNodes(t *testing.T) {
|
||||
t.Fatal("expected new Client, got nil")
|
||||
}
|
||||
|
||||
s, err := NewSubscriber(c, "/validpath", newFactory(""), log.NewNopLogger())
|
||||
s, err := NewInstancer(c, "/validpath", log.NewNopLogger())
|
||||
if err != stdzk.ErrNoServer {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if s != nil {
|
||||
t.Error("expected failed new Subscriber")
|
||||
t.Error("expected failed new Instancer")
|
||||
}
|
||||
|
||||
s, err = NewSubscriber(c, "invalidpath", newFactory(""), log.NewNopLogger())
|
||||
s, err = NewInstancer(c, "invalidpath", log.NewNopLogger())
|
||||
if err != stdzk.ErrInvalidPath {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -131,12 +131,12 @@ func TestCreateParentNodes(t *testing.T) {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
s, err = NewSubscriber(c, "/validpath", newFactory(""), log.NewNopLogger())
|
||||
s, err = NewInstancer(c, "/validpath", log.NewNopLogger())
|
||||
if err != ErrClientClosed {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if s != nil {
|
||||
t.Error("expected failed new Subscriber")
|
||||
t.Error("expected failed new Instancer")
|
||||
}
|
||||
|
||||
c, err = NewClient([]string{"localhost:65500"}, log.NewNopLogger(), Payload(payload))
|
||||
@ -147,11 +147,11 @@ func TestCreateParentNodes(t *testing.T) {
|
||||
t.Fatal("expected new Client, got nil")
|
||||
}
|
||||
|
||||
s, err = NewSubscriber(c, "/validpath", newFactory(""), log.NewNopLogger())
|
||||
s, err = NewInstancer(c, "/validpath", log.NewNopLogger())
|
||||
if err != stdzk.ErrNoServer {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if s != nil {
|
||||
t.Error("expected failed new Subscriber")
|
||||
t.Error("expected failed new Instancer")
|
||||
}
|
||||
}
|
||||
|
@ -1,2 +1,2 @@
|
||||
// Package zk provides subscriber and registrar implementations for ZooKeeper.
|
||||
// Package zk provides Instancer and Registrar implementations for ZooKeeper.
|
||||
package zk
|
||||
|
@ -3,31 +3,28 @@ package zk
|
||||
import (
|
||||
"github.com/samuel/go-zookeeper/zk"
|
||||
|
||||
"github.com/go-kit/kit/endpoint"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/sd"
|
||||
"github.com/go-kit/kit/sd/cache"
|
||||
"github.com/go-kit/kit/sd/internal/instance"
|
||||
)
|
||||
|
||||
// Subscriber yield endpoints stored in a certain ZooKeeper path. Any kind of
|
||||
// change in that path is watched and will update the Subscriber endpoints.
|
||||
type Subscriber struct {
|
||||
// Instancer yield instances stored in a certain ZooKeeper path. Any kind of
|
||||
// change in that path is watched and will update the subscribers.
|
||||
type Instancer struct {
|
||||
instance.Cache
|
||||
client Client
|
||||
path string
|
||||
cache *cache.Cache
|
||||
logger log.Logger
|
||||
quitc chan struct{}
|
||||
}
|
||||
|
||||
var _ sd.Subscriber = &Subscriber{}
|
||||
|
||||
// NewSubscriber returns a ZooKeeper subscriber. ZooKeeper will start watching
|
||||
// the given path for changes and update the Subscriber endpoints.
|
||||
func NewSubscriber(c Client, path string, factory sd.Factory, logger log.Logger) (*Subscriber, error) {
|
||||
s := &Subscriber{
|
||||
// NewInstancer returns a ZooKeeper Instancer. ZooKeeper will start watching
|
||||
// the given path for changes and update the Instancer endpoints.
|
||||
func NewInstancer(c Client, path string, logger log.Logger) (*Instancer, error) {
|
||||
s := &Instancer{
|
||||
Cache: *instance.NewCache(),
|
||||
client: c,
|
||||
path: path,
|
||||
cache: cache.New(factory, logger),
|
||||
logger: logger,
|
||||
quitc: make(chan struct{}),
|
||||
}
|
||||
@ -40,17 +37,18 @@ func NewSubscriber(c Client, path string, factory sd.Factory, logger log.Logger)
|
||||
instances, eventc, err := s.client.GetEntries(s.path)
|
||||
if err != nil {
|
||||
logger.Log("path", s.path, "msg", "failed to retrieve entries", "err", err)
|
||||
// TODO why zk constructor exits when other implementations continue?
|
||||
return nil, err
|
||||
}
|
||||
logger.Log("path", s.path, "instances", len(instances))
|
||||
s.cache.Update(instances)
|
||||
s.Update(sd.Event{Instances: instances})
|
||||
|
||||
go s.loop(eventc)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Subscriber) loop(eventc <-chan zk.Event) {
|
||||
func (s *Instancer) loop(eventc <-chan zk.Event) {
|
||||
var (
|
||||
instances []string
|
||||
err error
|
||||
@ -64,10 +62,11 @@ func (s *Subscriber) loop(eventc <-chan zk.Event) {
|
||||
instances, eventc, err = s.client.GetEntries(s.path)
|
||||
if err != nil {
|
||||
s.logger.Log("path", s.path, "msg", "failed to retrieve entries", "err", err)
|
||||
s.Update(sd.Event{Err: err})
|
||||
continue
|
||||
}
|
||||
s.logger.Log("path", s.path, "instances", len(instances))
|
||||
s.cache.Update(instances)
|
||||
s.Update(sd.Event{Instances: instances})
|
||||
|
||||
case <-s.quitc:
|
||||
return
|
||||
@ -75,12 +74,7 @@ func (s *Subscriber) loop(eventc <-chan zk.Event) {
|
||||
}
|
||||
}
|
||||
|
||||
// Endpoints implements the Subscriber interface.
|
||||
func (s *Subscriber) Endpoints() ([]endpoint.Endpoint, error) {
|
||||
return s.cache.Endpoints(), nil
|
||||
}
|
||||
|
||||
// Stop terminates the Subscriber.
|
||||
func (s *Subscriber) Stop() {
|
||||
// Stop terminates the Instancer.
|
||||
func (s *Instancer) Stop() {
|
||||
close(s.quitc)
|
||||
}
|
@ -3,18 +3,23 @@ package zk
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/sd"
|
||||
)
|
||||
|
||||
func TestSubscriber(t *testing.T) {
|
||||
var _ sd.Instancer = &Instancer{}
|
||||
|
||||
func TestInstancer(t *testing.T) {
|
||||
client := newFakeClient()
|
||||
|
||||
s, err := NewSubscriber(client, path, newFactory(""), logger)
|
||||
instancer, err := NewInstancer(client, path, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new Subscriber: %v", err)
|
||||
t.Fatalf("failed to create new Instancer: %v", err)
|
||||
}
|
||||
defer s.Stop()
|
||||
defer instancer.Stop()
|
||||
endpointer := sd.NewEndpointer(instancer, newFactory(""), logger)
|
||||
|
||||
if _, err := s.Endpoints(); err != nil {
|
||||
if _, err := endpointer.Endpoints(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -22,11 +27,12 @@ func TestSubscriber(t *testing.T) {
|
||||
func TestBadFactory(t *testing.T) {
|
||||
client := newFakeClient()
|
||||
|
||||
s, err := NewSubscriber(client, path, newFactory("kaboom"), logger)
|
||||
instancer, err := NewInstancer(client, path, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new Subscriber: %v", err)
|
||||
t.Fatalf("failed to create new Instancer: %v", err)
|
||||
}
|
||||
defer s.Stop()
|
||||
defer instancer.Stop()
|
||||
endpointer := sd.NewEndpointer(instancer, newFactory("kaboom"), logger)
|
||||
|
||||
// instance1 came online
|
||||
client.AddService(path+"/instance1", "kaboom")
|
||||
@ -34,7 +40,7 @@ func TestBadFactory(t *testing.T) {
|
||||
// instance2 came online
|
||||
client.AddService(path+"/instance2", "zookeeper_node_data")
|
||||
|
||||
if err = asyncTest(100*time.Millisecond, 1, s); err != nil {
|
||||
if err = asyncTest(100*time.Millisecond, 1, endpointer); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
@ -42,13 +48,14 @@ func TestBadFactory(t *testing.T) {
|
||||
func TestServiceUpdate(t *testing.T) {
|
||||
client := newFakeClient()
|
||||
|
||||
s, err := NewSubscriber(client, path, newFactory(""), logger)
|
||||
instancer, err := NewInstancer(client, path, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create new Subscriber: %v", err)
|
||||
t.Fatalf("failed to create new Instancer: %v", err)
|
||||
}
|
||||
defer s.Stop()
|
||||
defer instancer.Stop()
|
||||
endpointer := sd.NewEndpointer(instancer, newFactory(""), logger)
|
||||
|
||||
endpoints, err := s.Endpoints()
|
||||
endpoints, err := endpointer.Endpoints()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -63,7 +70,7 @@ func TestServiceUpdate(t *testing.T) {
|
||||
client.AddService(path+"/instance2", "zookeeper_node_data2")
|
||||
|
||||
// we should have 2 instances
|
||||
if err = asyncTest(100*time.Millisecond, 2, s); err != nil {
|
||||
if err = asyncTest(100*time.Millisecond, 2, endpointer); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
@ -81,7 +88,7 @@ func TestServiceUpdate(t *testing.T) {
|
||||
client.AddService(path+"/instance3", "zookeeper_node_data3")
|
||||
|
||||
// we should have 3 instances
|
||||
if err = asyncTest(100*time.Millisecond, 3, s); err != nil {
|
||||
if err = asyncTest(100*time.Millisecond, 3, endpointer); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
@ -92,26 +99,27 @@ func TestServiceUpdate(t *testing.T) {
|
||||
client.RemoveService(path + "/instance2")
|
||||
|
||||
// we should have 1 instance
|
||||
if err = asyncTest(100*time.Millisecond, 1, s); err != nil {
|
||||
if err = asyncTest(100*time.Millisecond, 1, endpointer); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBadSubscriberCreate(t *testing.T) {
|
||||
func TestBadInstancerCreate(t *testing.T) {
|
||||
client := newFakeClient()
|
||||
client.SendErrorOnWatch()
|
||||
s, err := NewSubscriber(client, path, newFactory(""), logger)
|
||||
|
||||
instancer, err := NewInstancer(client, path, logger)
|
||||
if err == nil {
|
||||
t.Error("expected error on new Subscriber")
|
||||
t.Error("expected error on new Instancer")
|
||||
}
|
||||
if s != nil {
|
||||
t.Error("expected Subscriber not to be created")
|
||||
if instancer != nil {
|
||||
t.Error("expected Instancer not to be created")
|
||||
}
|
||||
s, err = NewSubscriber(client, "BadPath", newFactory(""), logger)
|
||||
instancer, err = NewInstancer(client, "BadPath", logger)
|
||||
if err == nil {
|
||||
t.Error("expected error on new Subscriber")
|
||||
t.Error("expected error on new Instancer")
|
||||
}
|
||||
if s != nil {
|
||||
t.Error("expected Subscriber not to be created")
|
||||
if instancer != nil {
|
||||
t.Error("expected Instancer not to be created")
|
||||
}
|
||||
}
|
@ -114,7 +114,7 @@ func newFactory(fakeError string) sd.Factory {
|
||||
}
|
||||
}
|
||||
|
||||
func asyncTest(timeout time.Duration, want int, s *Subscriber) (err error) {
|
||||
func asyncTest(timeout time.Duration, want int, s sd.Endpointer) (err error) {
|
||||
var endpoints []endpoint.Endpoint
|
||||
have := -1 // want can never be <0
|
||||
t := time.After(timeout)
|
||||
|
Reference in New Issue
Block a user