mirror of
https://github.com/go-micro/go-micro.git
synced 2025-06-18 22:17:44 +02:00
feat(logger): add logger option to all micro components (override DefaultLogger) closes #2556 (#2559)
* feat(logger): add logger option to all components * fix: refactor api/rpc.go * fix: refactor api/stream.go * fix: api/options.go comment * fix(logger): do not use logger.Helper internally * fix(logger): fix comments * fix(logger): use level.Enabled method * fix: rename mlogger to log * fix: run go fmt * fix: log level * fix: factories Co-authored-by: Mohamed MHAMDI <mmhamdi@hubside.com> Co-authored-by: Davincible <david.brouwer.99@gmail.com>
This commit is contained in:
@ -17,7 +17,7 @@ type action int
|
||||
type kubernetes struct {
|
||||
sync.RWMutex
|
||||
// options configure runtime
|
||||
options runtime.Options
|
||||
options *runtime.Options
|
||||
// indicates if we're running
|
||||
running bool
|
||||
// used to stop the runtime
|
||||
@ -235,7 +235,7 @@ func (k *kubernetes) getService(labels map[string]string, opts ...client.GetOpti
|
||||
func (k *kubernetes) run(events <-chan runtime.Event) {
|
||||
t := time.NewTicker(time.Second * 10)
|
||||
defer t.Stop()
|
||||
|
||||
logger := k.options.Logger
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
@ -243,9 +243,7 @@ func (k *kubernetes) run(events <-chan runtime.Event) {
|
||||
// - do we even need the ticker for k8s services?
|
||||
case event := <-events:
|
||||
// NOTE: we only handle Update events for now
|
||||
if log.V(log.DebugLevel, log.DefaultLogger) {
|
||||
log.Debugf("Runtime received notification event: %v", event)
|
||||
}
|
||||
logger.Logf(log.DebugLevel, "Runtime received notification event: %v", event)
|
||||
switch event.Type {
|
||||
case runtime.Update:
|
||||
// only process if there's an actual service
|
||||
@ -277,9 +275,7 @@ func (k *kubernetes) run(events <-chan runtime.Event) {
|
||||
}, client.GetLabels(labels))
|
||||
|
||||
if err != nil {
|
||||
if log.V(log.DebugLevel, log.DefaultLogger) {
|
||||
log.Debugf("Runtime update failed to get service %s: %v", event.Service, err)
|
||||
}
|
||||
logger.Logf(log.DebugLevel, "Runtime update failed to get service %s: %v", event.Service, err)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -298,21 +294,15 @@ func (k *kubernetes) run(events <-chan runtime.Event) {
|
||||
// update the build time
|
||||
service.Spec.Template.Metadata.Annotations["updated"] = fmt.Sprintf("%d", event.Timestamp.Unix())
|
||||
|
||||
if log.V(log.DebugLevel, log.DefaultLogger) {
|
||||
log.Debugf("Runtime updating service: %s deployment: %s", event.Service, service.Metadata.Name)
|
||||
}
|
||||
logger.Logf(log.DebugLevel, "Runtime updating service: %s deployment: %s", event.Service, service.Metadata.Name)
|
||||
if err := k.client.Update(deploymentResource(&service)); err != nil {
|
||||
if log.V(log.DebugLevel, log.DefaultLogger) {
|
||||
log.Debugf("Runtime failed to update service %s: %v", event.Service, err)
|
||||
}
|
||||
logger.Logf(log.DebugLevel, "Runtime failed to update service %s: %v", event.Service, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-k.closed:
|
||||
if log.V(log.DebugLevel, log.DefaultLogger) {
|
||||
log.Debugf("Runtime stopped")
|
||||
}
|
||||
logger.Logf(log.DebugLevel, "Runtime stopped")
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -324,7 +314,7 @@ func (k *kubernetes) Init(opts ...runtime.Option) error {
|
||||
defer k.Unlock()
|
||||
|
||||
for _, o := range opts {
|
||||
o(&k.options)
|
||||
o(k.options)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -341,7 +331,7 @@ func (k *kubernetes) Logs(s *runtime.Service, options ...runtime.LogsOption) (ru
|
||||
go func() {
|
||||
records, err := klo.Read()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get logs for service '%v' from k8s: %v", err)
|
||||
k.options.Logger.Logf(log.ErrorLevel, "Failed to get logs for service '%v' from k8s: %v", err)
|
||||
return
|
||||
}
|
||||
// @todo: this might actually not run before podLogStream starts
|
||||
@ -425,7 +415,7 @@ func (k *kubernetes) Create(s *runtime.Service, opts ...runtime.CreateOption) er
|
||||
options.Image = k.getImage(s, options)
|
||||
|
||||
// create new service
|
||||
service := newService(s, options)
|
||||
service := newService(s, options, k.options.Logger)
|
||||
|
||||
// start the service
|
||||
return service.Start(k.client, client.CreateNamespace(options.Namespace))
|
||||
@ -542,7 +532,7 @@ func (k *kubernetes) Delete(s *runtime.Service, opts ...runtime.DeleteOption) er
|
||||
service := newService(s, runtime.CreateOptions{
|
||||
Type: k.options.Type,
|
||||
Namespace: options.Namespace,
|
||||
})
|
||||
}, k.options.Logger)
|
||||
|
||||
return service.Stop(k.client, client.DeleteNamespace(options.Namespace))
|
||||
}
|
||||
@ -567,9 +557,7 @@ func (k *kubernetes) Start() error {
|
||||
events, err = k.options.Scheduler.Notify()
|
||||
if err != nil {
|
||||
// TODO: should we bail here?
|
||||
if log.V(log.DebugLevel, log.DefaultLogger) {
|
||||
log.Debugf("Runtime failed to start update notifier")
|
||||
}
|
||||
k.options.Logger.Logf(log.DebugLevel, "Runtime failed to start update notifier")
|
||||
}
|
||||
}
|
||||
|
||||
@ -611,15 +599,9 @@ func (k *kubernetes) String() string {
|
||||
// NewRuntime creates new kubernetes runtime
|
||||
func NewRuntime(opts ...runtime.Option) runtime.Runtime {
|
||||
// get default options
|
||||
options := runtime.Options{
|
||||
// Create labels with type "micro": "service"
|
||||
Type: "service",
|
||||
}
|
||||
|
||||
// apply requested options
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
// Create labels with type "micro": "service"
|
||||
mtops := append([]runtime.Option{runtime.WithType("service")}, opts...)
|
||||
options := runtime.NewOptions(mtops...)
|
||||
|
||||
// kubernetes client
|
||||
client := client.NewClusterClient()
|
||||
|
Reference in New Issue
Block a user