1
0
mirror of https://github.com/Chipazawra/v8-1c-cluster-pde.git synced 2024-12-25 01:32:38 +02:00

add docker-copmpose demo

This commit is contained in:
Anton 2021-12-28 23:06:33 +03:00
parent dde70b0087
commit 4636298466
11 changed files with 1383 additions and 20 deletions

6
.env Normal file
View File

@ -0,0 +1,6 @@
ADMIN_USER=admin
ADMIN_PASSWORD=admin
RAS_HOST=192.168.10.10
RAS_PORT=2545
CLS_USER=admin
CLS_PASS=admin

19
caddy/Caddyfile Normal file
View File

@ -0,0 +1,19 @@
:9090 {
basicauth / {$ADMIN_USER} {$ADMIN_PASSWORD}
proxy / prometheus:9090 {
transparent
}
errors stderr
tls off
}
:3000 {
proxy / grafana:3000 {
transparent
websocket
}
errors stderr
tls off
}

Binary file not shown.

View File

@ -1,12 +1,81 @@
version: '3.9'
networks:
monitoring:
driver: bridge
volumes:
prometheus_data: {}
grafana_data: {}
services:
app:
build: .
container_name: app
image: app
prometheus:
image: prom/prometheus:latest
container_name: prometheus
volumes:
- ./prometheus:/etc/prometheus
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
restart: unless-stopped
expose:
- 9090
networks:
- monitoring
labels:
org.label-schema.group: "monitoring"
grafana:
image: grafana/grafana:latest
container_name: grafana
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/provisioning:/etc/grafana/provisioning
environment:
HOST_1C: '192.168.10.233'
PORT_1C: '1545'
- GF_SECURITY_ADMIN_USER=${ADMIN_USER}
- GF_SECURITY_ADMIN_PASSWORD=${ADMIN_PASSWORD}
- GF_USERS_ALLOW_SIGN_UP=false
restart: unless-stopped
expose:
- 3000
networks:
- monitoring
labels:
org.label-schema.group: "monitoring"
pde:
build: .
container_name: pde
environment:
- RAS_HOST=${RAS_HOST}
- RAS_PORT=${RAS_PORT}
restart: unless-stopped
expose:
- 9096
networks:
- monitoring
labels:
org.label-schema.group: "monitoring"
caddy:
image: stefanprodan/caddy
container_name: caddy
ports:
- 9096:9096
- "3000:3000"
- "9090:9090"
- "9096:9096"
volumes:
- ./caddy:/etc/caddy
environment:
- ADMIN_USER=${ADMIN_USER}
- ADMIN_PASSWORD=${ADMIN_PASSWORD}
restart: unless-stopped
networks:
- monitoring
labels:
org.label-schema.group: "monitoring"

View File

@ -0,0 +1,12 @@
apiVersion: 1
providers:
- name: 'dashboards'
orgId: 1
folder: ''
type: file
disableDeletion: false
editable: true
allowUiUpdates: true
options:
path: /etc/grafana/provisioning/dashboards

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,11 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
orgId: 1
url: http://prometheus:9090
basicAuth: false
isDefault: true
editable: true

View File

@ -1,11 +1,11 @@
package app
import (
"context"
"flag"
"fmt"
"log"
"net/http"
"os"
"github.com/Chipazawra/v8-1c-cluster-pde/internal/rpHostsCollector"
"github.com/caarlos0/env"
@ -19,7 +19,6 @@ var (
hostFlag string
portFlag string
exposeFlag string
ctx context.Context
)
func init() {
@ -27,6 +26,8 @@ func init() {
log.Fatalf("app: config...")
}
log.SetOutput(os.Stdout)
flag.StringVar(&hostFlag, "host", "", "cluster host.")
flag.StringVar(&portFlag, "port", "", "cluster port.")
flag.StringVar(&exposeFlag, "expose", "", "metrics port.")
@ -43,15 +44,13 @@ func init() {
if exposeFlag != "" {
conf.Expose = exposeFlag
}
ctx = context.Background()
}
func Run() error {
rcli := rascli.NewClient(fmt.Sprintf("%s:%s", conf.Host, conf.Port))
rcli.AuthenticateAgent(conf.User, conf.Pass)
log.Printf("cluster-pde connected to RAS: %v", fmt.Sprintf("%s:%s", conf.Host, conf.Port))
defer rcli.Close()
promRegistry := prometheus.NewRegistry()
@ -61,6 +60,8 @@ func Run() error {
promhttp.HandlerFor(promRegistry, promhttp.HandlerOpts{}),
)
log.Printf("cluster-pde is running on: %v", fmt.Sprintf("%s:%s", "", conf.Expose))
err := http.ListenAndServe(fmt.Sprintf("%s:%s", "", conf.Expose), nil)
if err != nil {
return Errorf(err)

View File

@ -1,9 +1,9 @@
package app
type Config struct {
Host string `env:"HOST_1C" envDefault:"localhost"`
Port string `env:"PORT_1C" envDefault:"1545"`
User string `env:"USER_1C"`
Pass string `env:"PASS_1C"`
Host string `env:"RAS_HOST" envDefault:"localhost"`
Port string `env:"RAS_PORT" envDefault:"1545"`
User string `env:"CLS_USER"`
Pass string `env:"CLS_PASS"`
Expose string `env:"EXPOSE" envDefault:"9096"`
}

View File

@ -30,11 +30,13 @@ type rpHostsCollector struct {
AvgDbCallTime *prometheus.Desc
AvgLockCallTime *prometheus.Desc
AvgServerCallTime *prometheus.Desc
Running *prometheus.Desc
Enable *prometheus.Desc
}
func New(rasapi rascli.Api) prometheus.Collector {
proccesLabels := []string{"cluster", "pid", "port", "enable", "running", "startedAt"}
proccesLabels := []string{"cluster", "pid", "port", "startedAt"}
return &rpHostsCollector{
ctx: context.Background(),
@ -95,6 +97,14 @@ func New(rasapi rascli.Api) prometheus.Collector {
"rp_hosts_avg_server_call_time",
"host avg server call time",
proccesLabels, nil),
Enable: prometheus.NewDesc(
"rp_hosts_enable",
"host enable",
proccesLabels, nil),
Running: prometheus.NewDesc(
"rp_hosts_running",
"host enable",
proccesLabels, nil),
}
}
@ -141,8 +151,6 @@ func (c *rpHostsCollector) funInCollect(ch chan<- prometheus.Metric, clusterInfo
clusterInfo.Name,
proccesInfo.Pid,
fmt.Sprint(proccesInfo.Port),
fmt.Sprint(proccesInfo.Enable),
fmt.Sprint(proccesInfo.Running),
proccesInfo.StartedAt.Format("2006-01-02 15:04:05"),
}
)
@ -230,7 +238,30 @@ func (c *rpHostsCollector) funInCollect(ch chan<- prometheus.Metric, clusterInfo
float64(proccesInfo.AvgServerCallTime),
proccesLabelsVal...,
)
ch <- prometheus.MustNewConstMetric(
c.Enable,
prometheus.GaugeValue,
func(fl bool) float64 {
if fl {
return 1.0
} else {
return 0.0
}
}(proccesInfo.Enable),
proccesLabelsVal...,
)
ch <- prometheus.MustNewConstMetric(
c.Running,
prometheus.GaugeValue,
func(fl bool) float64 {
if fl {
return 1.0
} else {
return 0.0
}
}(proccesInfo.Running),
proccesLabelsVal...,
)
rpHostsCount++
})

21
prometheus/prometheus.yml Normal file
View File

@ -0,0 +1,21 @@
global:
scrape_interval: 15s
evaluation_interval: 15s
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: 'docker-host-alpha'
# A scrape configuration containing exactly one endpoint to scrape.
scrape_configs:
- job_name: 'pde'
scrape_interval: 5s
static_configs:
- targets: ['pde:9096']
- job_name: 'prometheus'
scrape_interval: 10s
static_configs:
- targets: ['localhost:9090']