You've already forked opentelemetry-go
mirror of
https://github.com/open-telemetry/opentelemetry-go.git
synced 2025-11-27 22:49:15 +02:00
Merge metric SDK development branch "new_sdk/main" into "main" (#3175)
* Remove Old SDK and dependent code on that SDK (#2802) * Remove prometheus example code * Remove prometheus exporter code * Remove stdoutmetric code * Remove sdk/metric/* packages * Remove opencensus example code * Remove otlpmetric exporter code * Remove OpenCensus bridge code * go mod tidy * Remove empty modules * Remove the number and aggregator from the metric SDK (#2840) * Add MeterProvider/meter structure to new SDK (#2822) * Remove prometheus example code * Remove prometheus exporter code * Remove stdoutmetric code * Remove sdk/metric/* packages * Remove opencensus example code * Remove otlpmetric exporter code * Remove OpenCensus bridge code * go mod tidy * Remove empty modules * Add MeterProvider/meter structure to new SDK * Add vanity imports * go mod tidy * Add MeterProvider Flush/Shutdown required by spec * Cast nil ptr instead of alloc for comp time check * Apply suggestions from code review Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Apply suggested Shutdown comment * Apply fixes from feedback Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add sdk/metric/view structure (#2838) * Add sdk/metric/view package structure * Vanity import * Define the reader interface, and create a manual reader (#2885) * Add the manual reader to the sdk. * Incoperate feedback from PR. * additional PR comments * Fix lint * Fixes for PR. * Unexport ManualReader fix a few comments * Refactor reader testing into a harness (#2910) * Refactor reader testing into a harness * Run lint * Removed merge leftover * Use opentracing bridge from main * go mod tidy * crosslink * Remove Prometheus exporter from README for now * Run make with new tool set * Replace testReaderHarness with testify suite (#2915) * Add the periodic reader (#2909) * Add the metric.Exporter interface * Move the reader errors to reader.go * Update Reader.Collect docs Remove TODO being addressed in this PR and restate purpose of method. * Initial draft of the periodic reader * Refer to correct config in periodic reader opts * Refactor reader testing into a harness * Move wait group handling out of run * Refactor ticker creation to allow testing * Honor export timeout in run * Fix wait group wait bug * Add periodic reader tests * Fix lint * Update periodic reader comments * Add concurrency test for readers * Simplify the ticker stop deferral * Only register once * Restrict build of metric sdk to Go>1.16 * Clean up ShutdownBeforeRegister test * Test duplicate Reader registration (#2914) The specification requires the SDK prevent duplicate registrations for readers. This adds a test for that and fixes this for the manualReader. * Add WithReader and WithResource Options (#2905) * Add WithReader and WithResource Options * Run lint * Update WithReader fn signature based on feedback * crosslink * Remove zero-len check in unify * Restrict build to Go > 1.16 * Add bench test for reader collect methods (#2922) * Unify reader implementations (#2923) * Unify reader implementations Use an atomic.Value to manage concurrency without a lock. * Lint * Merge main into new sdk main (#2925) * Use already enabled revive linter and add depguard (#2883) * Refactor golangci-lint conf Order settings alphabetically. * Add revive settings to golangci conf * Check blank imports * Check bool-literal-in-expr * Check constant-logical-expr * Check context-as-argument * Check context-key-type * Check deep-exit * Check defer * Check dot-imports * Check duplicated-imports * Check early-return * Check empty-block * Check empty-lines * Check error-naming * Check error-return * Check error-strings * Check errorf * Stop ignoring context first arg in tests * Check exported comments * Check flag-parameter * Check identical branches * Check if-return * Check increment-decrement * Check indent-error-flow * Check deny list of go imports * Check import shadowing * Check package comments * Check range * Check range val in closure * Check range val address * Check redefines builtin id * Check string-format * Check struct tag * Check superfluous else * Check time equal * Check var naming * Check var declaration * Check unconditional recursion * Check unexported return * Check unhandled errors * Check unnecessary stmt * Check unnecessary break * Check waitgroup by value * Exclude deep-exit check in example*_test.go files * Move the minimum version to go 1.17 (#2917) * Move the minimum version to go 1.17 * Update readme and changelog Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * Make lint Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * Add view to metrics. (#2926) * WIP views public * Add attribute filters and comments. * Fixes for lint * Address comments * Fix lint * Changed view matching to expand end Removed the dscriptor, it was moved in previous patch * change wildcards into regex * Update comments * address comments. * Address more PR comments * renamed WithDescription to WithSetDescription. Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * Implement MeterProvider's Meter method (#2945) * Implement stubbed meter create method * Rename return value to avoid comment * Encapsulate meterRegistry tests with identifying name * Run lint fix * Comment meterRegistry being concurrent safe * Remove ordered meter tracking in the meterRegistry * Test range completeness instead of order * Remove provider field from meter * Initialize MeterProvider readers field for new (#2948) * Introduce Temporality, WithTemporality Reader options and InstrumentKind (#2949) * Introduce Temporality and InstrumentKind Because Temporality is the responsibility of the Reader additional methods are added to the Reader interface. And New options are created to configure the temporality selector. * Addresses comments, and adds tests. * Fix addition PR comment * Add aggregation package and reader/view options (#2958) * Add aggregation pkg and options * Update documentation for the aggregation pkg * Test Aggregation.Err * Fix aggregation pkg comment * Add WithAggregation comment * Add default aggregation * Rename WithAggregation and add AggregationSelector * Fix DefaultAggregationSelector use and decl * Replace Aggregation struct with iface * Add Copy method to hist and fix Err method * Additional test for monotonic bounds * Add aggregation method to Reader * Use AggregationSelector instead of inline func type * Switch RecordMinMax to NoMinMax * Deep copy and validate in options * Test the DefaultAggregationSelector * nolint for import-shadow of method * Fix Default aggregation comment * Test the explicit bucket histogram deep copy * Update temporality selector option (#2967) Match the WithAggregationSelector option pattern: define a TemporalitySelector type, export the DefaultTemporalitySelector function, and name the option with a Selector suffix. * Minor NewMeterProvider and producer docs fix (#2983) * Add internal package structure for aggregation (#2954) * Add the aggtor package * Restrict to Go 1.18 * Add missing build block to view_test.go * Comment Aggregator iface * Use Go 1.18 as the default ci version * Update Aggregator iface from feedback * Accept hist conf * Flatten aggtor into just internal * Add Cycler interface Separate the duties of aggregation and maintaining state across aggregation periods. * Remove build flags for doc.go * Clarify Cycler documentation * Remove aggregation fold logic * Rename Number to Atomic * Add tests for Atomic impls * Remove unneeded Atomic implementation Add back when filling in structures. * Fix article in Float64 docs * Remove Atomic This is an implementation detail. * Add aggregator_example_test * Fix hist example * Add issue numbers to all TODO and FIXME * Remove zero parameter comment * Combine the cycler into the aggregators * Remove the drop aggregator * Fix lint * Use attribute.Set instead of ptr to it Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Merge main into new_sdk/main (#2996) * Use already enabled revive linter and add depguard (#2883) * Refactor golangci-lint conf Order settings alphabetically. * Add revive settings to golangci conf * Check blank imports * Check bool-literal-in-expr * Check constant-logical-expr * Check context-as-argument * Check context-key-type * Check deep-exit * Check defer * Check dot-imports * Check duplicated-imports * Check early-return * Check empty-block * Check empty-lines * Check error-naming * Check error-return * Check error-strings * Check errorf * Stop ignoring context first arg in tests * Check exported comments * Check flag-parameter * Check identical branches * Check if-return * Check increment-decrement * Check indent-error-flow * Check deny list of go imports * Check import shadowing * Check package comments * Check range * Check range val in closure * Check range val address * Check redefines builtin id * Check string-format * Check struct tag * Check superfluous else * Check time equal * Check var naming * Check var declaration * Check unconditional recursion * Check unexported return * Check unhandled errors * Check unnecessary stmt * Check unnecessary break * Check waitgroup by value * Exclude deep-exit check in example*_test.go files * Move the minimum version to go 1.17 (#2917) * Move the minimum version to go 1.17 * Update readme and changelog Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * Use ByteSliceToString from golang.org/x/sys/unix (#2924) Use unix.ByteSliceToString to convert Utsname []byte fields to strings. This also allows to drop the charsToString helper which serves the same purpose and matches ByteSliceToString's implementation. Signed-off-by: Tobias Klauser <tklauser@distanz.ch> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * docs: fix typo (#2935) * add timeout to grpc connection in otel-collector example (#2939) * Closes: #2951 (#2952) This PR updates the example listed in the getting started doc so that it will compile without error. It also makes this example consistent with the code found in https://github.com/open-telemetry/opentelemetry-go/blob/main/example/fib/main.go Signed-off-by: Brad Topol <btopol@us.ibm.com> * fix data-model link (#2955) * Bump go.opentelemetry.io/proto/otlp from v0.16.0 to v0.18.0 (#2960) * Move to using Instrumentation Scope (#2976) * Move to using Instrumentation Scope * Use type alias, not definition * Add a changelog entry * docs(website_docs): fix exporting_data.md and getting-started.md toc (#2930) * docs(website_docs): fix toc * docs(website_docs): fix toc * update exporting_data.md for rerun check-links * update exporting_data.md for rerun check-links Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Update getting-started.md (#2984) grammar edit for line 175 of readme * fix typo (#2986) * fix typo * spell fix * typo fix (#2991) * added traces.txt to gitignore for fib (#2993) * Deprecate Library and move all uses to Scope (#2977) * Deprecate Library and move all uses to Scope * Add PR number to changelog * Don't change signatures in stable modules * Revert some changes * Rename internal struct names * A bit more renaming * Update sdk/trace/span.go Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Update based on feedback * Revert change Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Feat/bridge support text map (#2911) * feat: support TextMap * doc: add comment * test: support for ot.TextMap * Retrieve lost code due to merge * fix: retrieve lost code due to merge. test: support for ot.HTTPHeaders * go mod tidy * Optimized code style, add changelog * doc: Restore comments * wip: add test cases * test: fix args error * delete empty line * Fix syntax and changelog errors * Fix formatting errors Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Add a release template (#2863) * Add a release template * Update the about field Co-authored-by: Damien Mathieu <42@dmathieu.com> * Fix linting Issues * Add ignore for template link Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Fix merge of CHANGELOG.md Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Tobias Klauser <tobias.klauser@gmail.com> Co-authored-by: petrie <244236866@qq.com> Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Brad Topol <btopol@us.ibm.com> Co-authored-by: Craig Pastro <pastro.craig@gmail.com> Co-authored-by: Kshitija Murudi <kmurudi@ncsu.edu> Co-authored-by: Petrie Liu <lpfvip2008@gmail.com> Co-authored-by: Guangya Liu <gyliu513@gmail.com> Co-authored-by: Craig Pastro <craig.pastro@auth0.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> Co-authored-by: ttoad <qq530901331@outlook.com> * Add structure to the export data. (#2961) * Add structure to the export data. * Fix comments. * Apply suggestions from code review Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Address PR comments. * Updated optional historgram parameters. * Address PR comments. Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * Use export.Aggregation instead of internal.Aggregation (#3007) * Use export.Aggregation instead of internal * Return an export.Aggregation instead of a slice * Use attribute Sets instead of KeyValues for export data (#3012) Attribute Sets have stronger guarantees about the uniqueness of their keys and more functionality. We already ensure attributes are stored as Sets by the aggregator which will produce these data types. Instead of converting to a KeyValue slice, keep the data as a Set. Any user of the data can always call the ToSlice method to use the data as a slice of KeyValues. * Change Instrument Library to Instrument Scope (#3016) Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * move temporality to export/temporality (#3017) * move temporality to export/temporality * fix lint errors Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Rename Package sdk/metric/export into sdk/metric/metricdata (#3014) * fix unrelated changes * fix quote code * fix format * rebase pr * rebase pr * change usage of export to metricdata * Add metricdatatest package (#3025) * Use export.Aggregation instead of internal * Return an export.Aggregation instead of a slice * Use attribute Sets instead of KeyValues for export data Attribute Sets have stronger guarantees about the uniqueness of their keys and more functionality. We already ensure attributes are stored as Sets by the aggregator which will produce these data types. Instead of converting to a KeyValue slice, keep the data as a Set. Any user of the data can always call the ToSlice method to use the data as a slice of KeyValues. * Add export data type comparison testing API * Add Aggregation and Value comparison funcs * Move export testing to own pkg * Move exporttest to metricdatatest * Add licenses headers to files missing them * Use metricdata instead of export Fix merge of new_sdk/main * Rename exporttest pkg to metricdatatest * Fix spelling errors * Fix lint issues * Use testing pkg to error directly Include Helper() method calls to correct the call-stack. * Fix CompareAggregations Set equal to true by default * Generalize assertions and unexport equal checks * Abstract assert tests * Rename all exp var to r * Test AssertAggregationsEqual * Comment why Value and Aggregation are separate * Test AssertValuesEqual * Revert changes to metricdata/temporality.go * Expand pkg doc sentence * Add license header to assertion.go * Update assertion docs * Consolidate comparisons funcs into one file * Consolidate and fix docs * Consolidate assertion.go * Consolidate comparisons.go * make lint * Test with relatively static times * Update sdk/metric/metricdata/metricdatatest/comparisons.go Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Drop equal return from comparison funcs * Refactor AssertEqual * Remove reasN from testDatatype func params * Consolidate AssertEqual type conversions * Fix assertion error message * Add assertion failure tests * Remove unneeded strings join * Make comment include a possessive Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Adds a pipeline for creating reader's output (#3026) * Adds a pipeline for creating reader's output * fix metricdata move * fix lint * Apply suggestions from code review Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Address PR comments * Added resource test Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Use generic Sum, Gauge, and DataPoint value removing Value, Int64, and Float64 from metricdata (#3036) * Use generic DataPoint value * Fix assertion_fail_test.go * Declare Sum and DataPoints type in pipeline_test * Add MatchInstrumentKind filter for Views. (#3037) * Move InstrumentKind to view, Add view filter * remove TODO * Add the Option function, fix lint * use local var over 0 * Fix missing undefinedInstrumnet Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Change View Attribute Filter to detect if not set. (#3039) * Change View Attribute Filter to detect if not set. * Fix PR comments. * Rework test for no filter logic. * Add implementation of last-value aggregator (#3008) * Add last-value aggregator * Add test of last-value reset of unseen attrs * Add benchmark * Use generic DataPoint value * Fix assertion_fail_test.go * Fix tests * Remove unused test increment values * View.New() miss InstrumentKind check (#3043) Signed-off-by: liupengfei <lpfvip2008@gmail.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Add delta/cumulative histogram implementation (#3045) * Add delta/cumulative histogram implementation * Add histogram unit tests * Fix histValues Aggregate Store the new buckets value back to the values map. Ensure min/max are measured values, not zero values. * Fix lint * Add benchmarks * Test histograms internal functionality * Fix lint * Add TODO to look at memory use for cumu hist * Update sdk/metric/internal/histogram.go Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * use TemporalitySelector (#3050) Signed-off-by: Petrie <lpfvip2008@gmail.com> Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add implementation of Sum aggregators (#3000) * Implement the sum aggregators * Add unit tests for delta/cumulative sums * Add benchmarks * Merge sum tests into one * Remove unused start time from cumulative sum * Refactor benchmark tests Split benchmarks for the Aggregations and Aggregate methods so computational resource use can be determined. * goimports * Move timestamp out of lock * Refactor testing * Fix spelling mistake * Name param of expectFunc * Reset delta sum to zero instead of delete * Revert to deleting unused attr sets * Refactor testing to allow use across other aggs * Add TODO to bound cumulative sum mem usage * Fix misspelling * Unify aggregator benchmark code in aggregator_test * Use generic DataPoint value * Fix assertion_fail_test.go * Use generic metricdata types * Fix tests * Fix benchmarks * Fix lint * Update sum documentation * Remove leftover encapsulating test run * Use t.Cleanup to mock time * Consolidate expecter logic into funcs * Move errNegVal closer to use * Run the agg test * Add tests for monotonic sum Aggregate err * Run make lint * Make monotonic an arg of creation funcs * Remove Aggregate monotonic validation * Rename sum to valueMap The term sum is a good variable name that we do not want to take and valueMap better describes the type as the storage of the aggregator. * Adds a filter Aggregator. (#3040) * Adds a filter Aggregator. * Add lock and tests * Add Concurrency tests * fix lint errors * Add memory constrained todo. * Update filter comment. Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Add back the stdoutmetric exporter (#3057) * PoC stdoutmetric exporter * Use stringer to generate String for Temporality * Add vanity imports * Update Temporality string expected output * Do not return error from newConfig * Add shutdown unit tests * Fix spelling error * Unify testing of ctx errors and test ForceFlush * Add unit test for Export handle of ctx errs * Clarify documentation about alt OTLP exporter * Remove unused ErrUnrecognized A third party encoder can produce their own errors. This code does nothing unique with this error, therefore, it is removed. * Lint exporter_test.go * Refactor example_test.go removing FIXME * Add test for Export shutdown err * Add a discard encoder for testing * Acknowledged error is returned from Shutdown * Remove unexpected SchemaURL from stdouttrace test * Remove unneeded *testing.T arg from testEncoderOption * Fix the location of now * Revise and edit docs Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Remove stale TODO from metricdata/data.go (#3064) * Merge main into new_sdk/main (#3082) * Use already enabled revive linter and add depguard (#2883) * Refactor golangci-lint conf Order settings alphabetically. * Add revive settings to golangci conf * Check blank imports * Check bool-literal-in-expr * Check constant-logical-expr * Check context-as-argument * Check context-key-type * Check deep-exit * Check defer * Check dot-imports * Check duplicated-imports * Check early-return * Check empty-block * Check empty-lines * Check error-naming * Check error-return * Check error-strings * Check errorf * Stop ignoring context first arg in tests * Check exported comments * Check flag-parameter * Check identical branches * Check if-return * Check increment-decrement * Check indent-error-flow * Check deny list of go imports * Check import shadowing * Check package comments * Check range * Check range val in closure * Check range val address * Check redefines builtin id * Check string-format * Check struct tag * Check superfluous else * Check time equal * Check var naming * Check var declaration * Check unconditional recursion * Check unexported return * Check unhandled errors * Check unnecessary stmt * Check unnecessary break * Check waitgroup by value * Exclude deep-exit check in example*_test.go files * Move the minimum version to go 1.17 (#2917) * Move the minimum version to go 1.17 * Update readme and changelog Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * Use ByteSliceToString from golang.org/x/sys/unix (#2924) Use unix.ByteSliceToString to convert Utsname []byte fields to strings. This also allows to drop the charsToString helper which serves the same purpose and matches ByteSliceToString's implementation. Signed-off-by: Tobias Klauser <tklauser@distanz.ch> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * docs: fix typo (#2935) * add timeout to grpc connection in otel-collector example (#2939) * Closes: #2951 (#2952) This PR updates the example listed in the getting started doc so that it will compile without error. It also makes this example consistent with the code found in https://github.com/open-telemetry/opentelemetry-go/blob/main/example/fib/main.go Signed-off-by: Brad Topol <btopol@us.ibm.com> * fix data-model link (#2955) * Bump go.opentelemetry.io/proto/otlp from v0.16.0 to v0.18.0 (#2960) * Move to using Instrumentation Scope (#2976) * Move to using Instrumentation Scope * Use type alias, not definition * Add a changelog entry * docs(website_docs): fix exporting_data.md and getting-started.md toc (#2930) * docs(website_docs): fix toc * docs(website_docs): fix toc * update exporting_data.md for rerun check-links * update exporting_data.md for rerun check-links Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Update getting-started.md (#2984) grammar edit for line 175 of readme * fix typo (#2986) * fix typo * spell fix * typo fix (#2991) * added traces.txt to gitignore for fib (#2993) * Deprecate Library and move all uses to Scope (#2977) * Deprecate Library and move all uses to Scope * Add PR number to changelog * Don't change signatures in stable modules * Revert some changes * Rename internal struct names * A bit more renaming * Update sdk/trace/span.go Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Update based on feedback * Revert change Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Feat/bridge support text map (#2911) * feat: support TextMap * doc: add comment * test: support for ot.TextMap * Retrieve lost code due to merge * fix: retrieve lost code due to merge. test: support for ot.HTTPHeaders * go mod tidy * Optimized code style, add changelog * doc: Restore comments * wip: add test cases * test: fix args error * delete empty line * Fix syntax and changelog errors * Fix formatting errors Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Add a release template (#2863) * Add a release template * Update the about field Co-authored-by: Damien Mathieu <42@dmathieu.com> * Fix linting Issues * Add ignore for template link Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Add workflow to automate bundling dependabot PRs (#2997) Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> * Release prep 1.8.0 (#3001) * Update CHANGELOG and versions.yaml for 1.8.0 release Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> * Update go-build-tools Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> * Prepare stable-v1 for version v1.8.0 * Prepare experimental-metrics for version v0.31.0 * Prepare bridge for version v0.31.0 * `make go-mod-tidy` should use `-compat=1.17` now Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> * Update CHANGELOG.md Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Add benchmark metric test for UpDownCounter (#2655) * add benchmark metric test for UpDownCounter * move counter annotation up * fix syncFloat64 to syncInt64 * fix syncFloat64 to syncInt64 * fix go-lint err * Add semconv/v1.11.0 (#3009) Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add semconv/v1.12.0 (#3010) * Add semconv/v1.12.0 * Update all semconv use to v1.12.0 Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add http.method attribute to http server metric (#3018) * Add http.method attribute to http server metric Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * fix lint Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * fix lint Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * fix for reviews Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * add changelog entry Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * Add tests and fix opentracing bridge defer warning (#3029) * add tests and fix opentracing bridge defer warning * add changelog entry * Update CHANGELOG.md Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Update bridge/opentracing/bridge_test.go Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Introduce "split" metric schema transformation (#2999) This is a new transformation type that allows to describe a change where a metric is converted to several other metrics by eliminating an attribute. An example of such change that happened recently is this: https://github.com/open-telemetry/opentelemetry-specification/pull/2617 This PR implements specification change https://github.com/open-telemetry/opentelemetry-specification/pull/2653 This PR creates package v1.1 for the new functionality. The old package v1.0 remains unchanged. * Release v1.9.0 (#3052) * Bump versions in versions.yaml * Prepare stable-v1 for version v1.9.0 * Prepare experimental-schema for version v0.0.3 * Update changelog for release * Replace ioutil with io and os (#3058) * Make several vars into consts (#3068) * Add support for Go 1.19 (#3077) * Add support for Go 1.19 * Update CHANGELOG.md Co-authored-by: Sam Xie <sam@samxie.me> Co-authored-by: Sam Xie <sam@samxie.me> * Update compatibility documentation (#3079) Remove 3 month timeline for backwards support of old versions of Go. Signed-off-by: Brad Topol <btopol@us.ibm.com> Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Tobias Klauser <tobias.klauser@gmail.com> Co-authored-by: petrie <244236866@qq.com> Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Brad Topol <btopol@us.ibm.com> Co-authored-by: Craig Pastro <pastro.craig@gmail.com> Co-authored-by: Kshitija Murudi <kmurudi@ncsu.edu> Co-authored-by: Petrie Liu <lpfvip2008@gmail.com> Co-authored-by: Guangya Liu <gyliu513@gmail.com> Co-authored-by: Craig Pastro <craig.pastro@auth0.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> Co-authored-by: ttoad <qq530901331@outlook.com> Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Co-authored-by: Tigran Najaryan <4194920+tigrannajaryan@users.noreply.github.com> Co-authored-by: Håvard Anda Estensen <haavard.ae@gmail.com> Co-authored-by: Mikhail Mazurskiy <126021+ash2k@users.noreply.github.com> Co-authored-by: Sam Xie <sam@samxie.me> * Adds the option to ignore timestamps in metric data tests. (#3076) * Adds the option to ignore timestamps in metric data tests * use config over bool Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Adds a pipelineRegistry to manage creating aggregators. (#3044) * Adds a pipelineRegistry to manage creating aggregators. * Made pipeline generic * Add aggregation filter to the registry. Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * Remove stale TODO (#3083) The aggregation transform function was added in #2958. * Add back the otlpmetric transforms (#3065) * Add otlpmetric transforms * Split aggregation transforms to own file * Rename Iterator to AttrIter * Update pkg docs These are internal docs use developer based language. * Document all exported funcs * Unify metricdata type transforms into one file * Rename metrics.go to metricdata.go * Copy back attribute tests * Copy back in Iterator test * Refactor attribute tests * Add tests for metricdata transforms * Add multiErr support for digestible transform errs * Test transform errors * go mod tidy * Use key field * goimported * gofmt-ed * Fix error documentation * go mod tidy * Changes instruments uniqueness in pipeline. (#3071) * Changes instruments uniquness in pipeline. * Fix lint * Update sdk/metric/pipeline.go Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Restore the exporters/otlp/otlpmetric/internal/otelconfig package (#3090) * Restore otlpmetric/otlpconfig from main * Rename otlpconfig to oconf * Remove the empty envconfig_test.go * Update import of otlpconfig in oconf_test * go mod tidy * Run make * add internal OpenCensus metric translation library (#3099) * reintroduce opencensus trace bridge (#3098) Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Document the sdk/metric/view package (#3086) * Add package documentation for sdk/metric/view * Refer to views not configs in WithReader docs * Fix vanity url for view_test.go * Add example tests for view options * Add package example * Fix view type docs * Remove build constraint for doc.go * Fix lint * Adds async instruments and providers. (#3084) * Adds instrument providers and instruments. * Don't return nil instrument, return with error * removed sync * Added a number of tests. Signed-off-by: GitHub <noreply@github.com> * Address PR comments * fix error messages * fixes typo in test name Signed-off-by: GitHub <noreply@github.com> * Fix lint issues * moved the testCallback into the TestMeterCreateInstrument Signed-off-by: GitHub <noreply@github.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Merge branch 'main' into new_sdk/main (#3111) * Use already enabled revive linter and add depguard (#2883) * Refactor golangci-lint conf Order settings alphabetically. * Add revive settings to golangci conf * Check blank imports * Check bool-literal-in-expr * Check constant-logical-expr * Check context-as-argument * Check context-key-type * Check deep-exit * Check defer * Check dot-imports * Check duplicated-imports * Check early-return * Check empty-block * Check empty-lines * Check error-naming * Check error-return * Check error-strings * Check errorf * Stop ignoring context first arg in tests * Check exported comments * Check flag-parameter * Check identical branches * Check if-return * Check increment-decrement * Check indent-error-flow * Check deny list of go imports * Check import shadowing * Check package comments * Check range * Check range val in closure * Check range val address * Check redefines builtin id * Check string-format * Check struct tag * Check superfluous else * Check time equal * Check var naming * Check var declaration * Check unconditional recursion * Check unexported return * Check unhandled errors * Check unnecessary stmt * Check unnecessary break * Check waitgroup by value * Exclude deep-exit check in example*_test.go files * Move the minimum version to go 1.17 (#2917) * Move the minimum version to go 1.17 * Update readme and changelog Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * Use ByteSliceToString from golang.org/x/sys/unix (#2924) Use unix.ByteSliceToString to convert Utsname []byte fields to strings. This also allows to drop the charsToString helper which serves the same purpose and matches ByteSliceToString's implementation. Signed-off-by: Tobias Klauser <tklauser@distanz.ch> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * docs: fix typo (#2935) * add timeout to grpc connection in otel-collector example (#2939) * Closes: #2951 (#2952) This PR updates the example listed in the getting started doc so that it will compile without error. It also makes this example consistent with the code found in https://github.com/open-telemetry/opentelemetry-go/blob/main/example/fib/main.go Signed-off-by: Brad Topol <btopol@us.ibm.com> * fix data-model link (#2955) * Bump go.opentelemetry.io/proto/otlp from v0.16.0 to v0.18.0 (#2960) * Move to using Instrumentation Scope (#2976) * Move to using Instrumentation Scope * Use type alias, not definition * Add a changelog entry * docs(website_docs): fix exporting_data.md and getting-started.md toc (#2930) * docs(website_docs): fix toc * docs(website_docs): fix toc * update exporting_data.md for rerun check-links * update exporting_data.md for rerun check-links Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Update getting-started.md (#2984) grammar edit for line 175 of readme * fix typo (#2986) * fix typo * spell fix * typo fix (#2991) * added traces.txt to gitignore for fib (#2993) * Deprecate Library and move all uses to Scope (#2977) * Deprecate Library and move all uses to Scope * Add PR number to changelog * Don't change signatures in stable modules * Revert some changes * Rename internal struct names * A bit more renaming * Update sdk/trace/span.go Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Update based on feedback * Revert change Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Feat/bridge support text map (#2911) * feat: support TextMap * doc: add comment * test: support for ot.TextMap * Retrieve lost code due to merge * fix: retrieve lost code due to merge. test: support for ot.HTTPHeaders * go mod tidy * Optimized code style, add changelog * doc: Restore comments * wip: add test cases * test: fix args error * delete empty line * Fix syntax and changelog errors * Fix formatting errors Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Add a release template (#2863) * Add a release template * Update the about field Co-authored-by: Damien Mathieu <42@dmathieu.com> * Fix linting Issues * Add ignore for template link Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Add workflow to automate bundling dependabot PRs (#2997) Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> * Release prep 1.8.0 (#3001) * Update CHANGELOG and versions.yaml for 1.8.0 release Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> * Update go-build-tools Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> * Prepare stable-v1 for version v1.8.0 * Prepare experimental-metrics for version v0.31.0 * Prepare bridge for version v0.31.0 * `make go-mod-tidy` should use `-compat=1.17` now Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> * Update CHANGELOG.md Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Add benchmark metric test for UpDownCounter (#2655) * add benchmark metric test for UpDownCounter * move counter annotation up * fix syncFloat64 to syncInt64 * fix syncFloat64 to syncInt64 * fix go-lint err * Add semconv/v1.11.0 (#3009) Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add semconv/v1.12.0 (#3010) * Add semconv/v1.12.0 * Update all semconv use to v1.12.0 Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add http.method attribute to http server metric (#3018) * Add http.method attribute to http server metric Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * fix lint Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * fix lint Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * fix for reviews Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * add changelog entry Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * Add tests and fix opentracing bridge defer warning (#3029) * add tests and fix opentracing bridge defer warning * add changelog entry * Update CHANGELOG.md Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Update bridge/opentracing/bridge_test.go Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Introduce "split" metric schema transformation (#2999) This is a new transformation type that allows to describe a change where a metric is converted to several other metrics by eliminating an attribute. An example of such change that happened recently is this: https://github.com/open-telemetry/opentelemetry-specification/pull/2617 This PR implements specification change https://github.com/open-telemetry/opentelemetry-specification/pull/2653 This PR creates package v1.1 for the new functionality. The old package v1.0 remains unchanged. * Release v1.9.0 (#3052) * Bump versions in versions.yaml * Prepare stable-v1 for version v1.9.0 * Prepare experimental-schema for version v0.0.3 * Update changelog for release * Replace ioutil with io and os (#3058) * Make several vars into consts (#3068) * Add support for Go 1.19 (#3077) * Add support for Go 1.19 * Update CHANGELOG.md Co-authored-by: Sam Xie <sam@samxie.me> Co-authored-by: Sam Xie <sam@samxie.me> * Update compatibility documentation (#3079) Remove 3 month timeline for backwards support of old versions of Go. * Fix `opentracing.Bridge` where it miss identifying the spanKind (#3096) * Fix opentracing.Bridge where it was not identifying the spanKinf correctly * fix test * changelog * Keeping backward comppatibillity * Update CHANGELOG.md Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Update CHANGELOG.md Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * replace `required` by `requirementlevel` (#3103) * Change the inclusivity of exponential histogram bounds (#2982) * Use lower-inclusive boundaries * make exponent and logarithm more symmetric Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Update golangci-lint to v1.48.0 (#3105) * Update golangci-lint to v1.48.0 Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * Bump go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) * Bump go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> Signed-off-by: Brad Topol <btopol@us.ibm.com> Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Tobias Klauser <tobias.klauser@gmail.com> Co-authored-by: petrie <244236866@qq.com> Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Brad Topol <btopol@us.ibm.com> Co-authored-by: Craig Pastro <pastro.craig@gmail.com> Co-authored-by: Kshitija Murudi <kmurudi@ncsu.edu> Co-authored-by: Petrie Liu <lpfvip2008@gmail.com> Co-authored-by: Guangya Liu <gyliu513@gmail.com> Co-authored-by: Craig Pastro <craig.pastro@auth0.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> Co-authored-by: ttoad <qq530901331@outlook.com> Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Co-authored-by: Tigran Najaryan <4194920+tigrannajaryan@users.noreply.github.com> Co-authored-by: Håvard Anda Estensen <haavard.ae@gmail.com> Co-authored-by: Mikhail Mazurskiy <126021+ash2k@users.noreply.github.com> Co-authored-by: Sam Xie <sam@samxie.me> Co-authored-by: Alan Protasio <alanprot@gmail.com> Co-authored-by: Joshua MacDonald <jmacd@users.noreply.github.com> * Add otlpmetric exporter (#3089) * Add otlpmetric package doc * Add Client interface * Add the Exporter Have the Exporter ensure synchronous access to all client methods. * Add race detection test for Exporter * Expand New godocs * Fix lint * Merge transform and upload errors * Fix ineffectual increment * Make pipelineRegistry non-generic (#3115) * Make pipelineRegistry non-generic * Add Synchronous instruments (#3124) * Add Synchronous instruments * remove duplicate code in instrument * Fixes to Histogram comments * Add back the otlpmetricgrpc exporter (#3094) * Add otlpmetric package doc * Add Client interface * Add the Exporter Have the Exporter ensure synchronous access to all client methods. * Add race detection test for Exporter * Expand New godocs * Fix lint * Add the otlpmetricgrpc Go module * Restore otlpmetricgrpc from main * Remove integration testing from otlpmetricgrpc * Fix import of otlpconfig to oconf * Update client Add ForceFlush method to satisfy otlpmetric.Client, unexport Start, and restructure NewClient to return a started client. * Update otlpmetricgrpc New functions Remove NewUnstarted and only export New. * Remove unneeded client sync The exporter handle the synchronization of client method calls. * Update example_test.go * Update client_unit_test.go * Rename client_unit_test.go to client_test.go * Rename options.go to config.go * Add package doc * Unify exporter.go and doc.go into client.go * Unexport NewClient * Correct option documentation * Add env config documentation * go mod tidy * Restrict build to Go 1.18 * Update client.go Fix copied UploadMetrics documentation. * Run make * Close client conn even if context deadline reached * Add sdk/metric Go pkg docs and example (#3139) * Add sdk/metric Go pkg docs * Add example_test.go * Add Go 1.18 build guard to example_test.go * Merge main into new_sdk/main (#3141) * Use already enabled revive linter and add depguard (#2883) * Refactor golangci-lint conf Order settings alphabetically. * Add revive settings to golangci conf * Check blank imports * Check bool-literal-in-expr * Check constant-logical-expr * Check context-as-argument * Check context-key-type * Check deep-exit * Check defer * Check dot-imports * Check duplicated-imports * Check early-return * Check empty-block * Check empty-lines * Check error-naming * Check error-return * Check error-strings * Check errorf * Stop ignoring context first arg in tests * Check exported comments * Check flag-parameter * Check identical branches * Check if-return * Check increment-decrement * Check indent-error-flow * Check deny list of go imports * Check import shadowing * Check package comments * Check range * Check range val in closure * Check range val address * Check redefines builtin id * Check string-format * Check struct tag * Check superfluous else * Check time equal * Check var naming * Check var declaration * Check unconditional recursion * Check unexported return * Check unhandled errors * Check unnecessary stmt * Check unnecessary break * Check waitgroup by value * Exclude deep-exit check in example*_test.go files * Move the minimum version to go 1.17 (#2917) * Move the minimum version to go 1.17 * Update readme and changelog Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * Use ByteSliceToString from golang.org/x/sys/unix (#2924) Use unix.ByteSliceToString to convert Utsname []byte fields to strings. This also allows to drop the charsToString helper which serves the same purpose and matches ByteSliceToString's implementation. Signed-off-by: Tobias Klauser <tklauser@distanz.ch> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * docs: fix typo (#2935) * add timeout to grpc connection in otel-collector example (#2939) * Closes: #2951 (#2952) This PR updates the example listed in the getting started doc so that it will compile without error. It also makes this example consistent with the code found in https://github.com/open-telemetry/opentelemetry-go/blob/main/example/fib/main.go Signed-off-by: Brad Topol <btopol@us.ibm.com> * fix data-model link (#2955) * Bump go.opentelemetry.io/proto/otlp from v0.16.0 to v0.18.0 (#2960) * Move to using Instrumentation Scope (#2976) * Move to using Instrumentation Scope * Use type alias, not definition * Add a changelog entry * docs(website_docs): fix exporting_data.md and getting-started.md toc (#2930) * docs(website_docs): fix toc * docs(website_docs): fix toc * update exporting_data.md for rerun check-links * update exporting_data.md for rerun check-links Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Update getting-started.md (#2984) grammar edit for line 175 of readme * fix typo (#2986) * fix typo * spell fix * typo fix (#2991) * added traces.txt to gitignore for fib (#2993) * Deprecate Library and move all uses to Scope (#2977) * Deprecate Library and move all uses to Scope * Add PR number to changelog * Don't change signatures in stable modules * Revert some changes * Rename internal struct names * A bit more renaming * Update sdk/trace/span.go Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Update based on feedback * Revert change Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Feat/bridge support text map (#2911) * feat: support TextMap * doc: add comment * test: support for ot.TextMap * Retrieve lost code due to merge * fix: retrieve lost code due to merge. test: support for ot.HTTPHeaders * go mod tidy * Optimized code style, add changelog * doc: Restore comments * wip: add test cases * test: fix args error * delete empty line * Fix syntax and changelog errors * Fix formatting errors Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Add a release template (#2863) * Add a release template * Update the about field Co-authored-by: Damien Mathieu <42@dmathieu.com> * Fix linting Issues * Add ignore for template link Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Add workflow to automate bundling dependabot PRs (#2997) Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> * Release prep 1.8.0 (#3001) * Update CHANGELOG and versions.yaml for 1.8.0 release Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> * Update go-build-tools Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> * Prepare stable-v1 for version v1.8.0 * Prepare experimental-metrics for version v0.31.0 * Prepare bridge for version v0.31.0 * `make go-mod-tidy` should use `-compat=1.17` now Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> * Update CHANGELOG.md Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Add benchmark metric test for UpDownCounter (#2655) * add benchmark metric test for UpDownCounter * move counter annotation up * fix syncFloat64 to syncInt64 * fix syncFloat64 to syncInt64 * fix go-lint err * Add semconv/v1.11.0 (#3009) Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add semconv/v1.12.0 (#3010) * Add semconv/v1.12.0 * Update all semconv use to v1.12.0 Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add http.method attribute to http server metric (#3018) * Add http.method attribute to http server metric Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * fix lint Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * fix lint Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * fix for reviews Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * add changelog entry Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> * Add tests and fix opentracing bridge defer warning (#3029) * add tests and fix opentracing bridge defer warning * add changelog entry * Update CHANGELOG.md Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Update bridge/opentracing/bridge_test.go Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Introduce "split" metric schema transformation (#2999) This is a new transformation type that allows to describe a change where a metric is converted to several other metrics by eliminating an attribute. An example of such change that happened recently is this: https://github.com/open-telemetry/opentelemetry-specification/pull/2617 This PR implements specification change https://github.com/open-telemetry/opentelemetry-specification/pull/2653 This PR creates package v1.1 for the new functionality. The old package v1.0 remains unchanged. * Release v1.9.0 (#3052) * Bump versions in versions.yaml * Prepare stable-v1 for version v1.9.0 * Prepare experimental-schema for version v0.0.3 * Update changelog for release * Replace ioutil with io and os (#3058) * Make several vars into consts (#3068) * Add support for Go 1.19 (#3077) * Add support for Go 1.19 * Update CHANGELOG.md Co-authored-by: Sam Xie <sam@samxie.me> Co-authored-by: Sam Xie <sam@samxie.me> * Update compatibility documentation (#3079) Remove 3 month timeline for backwards support of old versions of Go. * Fix `opentracing.Bridge` where it miss identifying the spanKind (#3096) * Fix opentracing.Bridge where it was not identifying the spanKinf correctly * fix test * changelog * Keeping backward comppatibillity * Update CHANGELOG.md Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> * Update CHANGELOG.md Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * replace `required` by `requirementlevel` (#3103) * Change the inclusivity of exponential histogram bounds (#2982) * Use lower-inclusive boundaries * make exponent and logarithm more symmetric Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Update golangci-lint to v1.48.0 (#3105) * Update golangci-lint to v1.48.0 Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * Bump go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) * Bump go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Update tracer to guard for a nil ctx (#3110) * Update tracer to guard for a nil ctx Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> Co-authored-by: Tyler Yahn <MrAlias@users.noreply.github.com> * Fix sdk/instrumentation pkg docs (#3130) * Add instrumentation scope attributes (#3131) * Add WithScopeAttributes TracerOption to trace API * Add Attributes field to instrumentation Scope * Use scope attributes for new Tracer * Fix stdouttrace expected test output * Allow unexported Set fields in sdk/trace test * Export instrumentation scope attrs in OTLP * Add changes to the changelog * Fix imports with make lint * Add unit tests for WithScopeAttributes * Fix English in Scope documentation * Add WithScopeAttributes MeterOption to metric API package (#3132) * Add WithScopeAttributes MeterOption to metric pkg * Add MeterConfig unit tests * Add changes to changelog * Fix import linting * Update MeterProvider documentation Include information about how to use WithScopeAttributes. * Refactor TracerProvider documentation (#3133) * Refactor TracerProvider documentation * Fix English article * Grammar fixes * consistency-of: Changed signal names for website docs (#3137) * Shut down all processors even on error (#3091) * Fix stdoutmetric example test The merged instrumentation Scope includes SchemaURL and Attributes now, add them to the expected output. Signed-off-by: Brad Topol <btopol@us.ibm.com> Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Tobias Klauser <tobias.klauser@gmail.com> Co-authored-by: petrie <244236866@qq.com> Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Brad Topol <btopol@us.ibm.com> Co-authored-by: Craig Pastro <pastro.craig@gmail.com> Co-authored-by: Kshitija Murudi <kmurudi@ncsu.edu> Co-authored-by: Petrie Liu <lpfvip2008@gmail.com> Co-authored-by: Guangya Liu <gyliu513@gmail.com> Co-authored-by: Craig Pastro <craig.pastro@auth0.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> Co-authored-by: ttoad <qq530901331@outlook.com> Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Co-authored-by: Tigran Najaryan <4194920+tigrannajaryan@users.noreply.github.com> Co-authored-by: Håvard Anda Estensen <haavard.ae@gmail.com> Co-authored-by: Mikhail Mazurskiy <126021+ash2k@users.noreply.github.com> Co-authored-by: Sam Xie <sam@samxie.me> Co-authored-by: Alan Protasio <alanprot@gmail.com> Co-authored-by: Joshua MacDonald <jmacd@users.noreply.github.com> Co-authored-by: Mitch Usher <cull.methi@gmail.com> Co-authored-by: Gaurang Patel <mr.patelgaurang@gmail.com> * Remove empty metrictest pkg (#3148) * Add exporters/otlp/otlpmetric/internal/otest (#3125) * Add otlpmetric package doc * Add Client interface * Add the Exporter Have the Exporter ensure synchronous access to all client methods. * Add race detection test for Exporter * Expand New godocs * Fix lint * Restore otlpmetrictest from main * Rename otlpmetrictest to otest * Remove data.go The functions and types it contains are no longer relevant to the SDK. * Update client context error tests Remove multiple shutdown tests. The Client interface states this should never happen. * Remove collector.go and otlptest.go * Expand client tests with ctx and force-flush * Add UploadMetrics tests * Test the tests with a trivial client * Condense all to client.go * Example of how to run RunClientTests * Add client integration testing * Add GRPCCollector * Remove GRPCCollector to limit scope of PR * Add back the otlpmetrichttp exporter (#3097) * Add otlpmetric package doc * Add Client interface * Add the Exporter Have the Exporter ensure synchronous access to all client methods. * Add race detection test for Exporter * Expand New godocs * Fix lint * Add back the otlpmetrichttp pkg from main * Restrict to Go 1.18 and above * Remove integration testing * Rename client_unit_test.go to client_test.go * Rename options.go to config.go * Remove the NewUnstarted func * Remove Start method from client * Add no-op ForceFlush method to client * Update otlpconfig pkg name to oconf * Rename Stop method to Shutdown Match the otlpmetric.Client interface. * Update creation functions to compile * Remove name field from client * Remove sync of methods from client This is handled by the exporter. * Remove unused generalCfg field from client * Replace cfg client field with used conf vals * Use a http request instead of url/header fields * Remove NewClient and move New into client.go * Rename client.client field to client.httpClient * Update client tests Remove test of a retry config and add functional tests of the client methods honoring a context. * Remove deprecated WithMaxAttempts and WithBackoff * Update option docs Include info on envvars. * Fix lint * Fix lint errors * Revert New to accept a context * Add example test * Update pkg docs * go mod tidy * Use url.URL to form HTTP request URL * Remove stale TODO in sdk/view (#3149) Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * Use unique metric testing data in reader_test (#3151) Address unresolved TODO. Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> * Add new metric SDK changes to changelog (#3150) Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add integration and config testing to otlpmetricgrpc (#3126) * Add the GRPCCollector to otest * Use otest to test otlpmetricgrpc Client * Add WithHeaders and WithTimeout tests * Add integration and config testing to otlpmetrichttp (#3155) * Add HTTPCollector to otest * Add integration testing for otlpmetrichttp * Fix NewHTTPCollector docs * Add config tests * Fix lint * Add WithURLPath test * Add WithTLSClientConfig test * Ignore depguard for crypto/x509/pkix This is a testing package that uses the package to generate a weak testing TLS certificate. * Add Prometheus exporter code (#3135) * Add Prometheus exporter example (#3168) * Add back prom exporter to README.md * Fix removal changes from #3154 in API * Update CHANGELOG with PR number Signed-off-by: Brad Topol <btopol@us.ibm.com> Signed-off-by: Anthony J Mirabella <a9@aneurysm9.com> Signed-off-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Signed-off-by: GitHub <noreply@github.com> Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> Co-authored-by: Chester Cheung <cheung.zhy.csu@gmail.com> Co-authored-by: Anthony Mirabella <a9@aneurysm9.com> Co-authored-by: Tobias Klauser <tobias.klauser@gmail.com> Co-authored-by: petrie <244236866@qq.com> Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Brad Topol <btopol@us.ibm.com> Co-authored-by: Craig Pastro <pastro.craig@gmail.com> Co-authored-by: Kshitija Murudi <kmurudi@ncsu.edu> Co-authored-by: Petrie Liu <lpfvip2008@gmail.com> Co-authored-by: Guangya Liu <gyliu513@gmail.com> Co-authored-by: Craig Pastro <craig.pastro@auth0.com> Co-authored-by: ttoad <qq530901331@outlook.com> Co-authored-by: Ziqi Zhao <zhaoziqi9146@gmail.com> Co-authored-by: Tigran Najaryan <4194920+tigrannajaryan@users.noreply.github.com> Co-authored-by: Håvard Anda Estensen <haavard.ae@gmail.com> Co-authored-by: Mikhail Mazurskiy <126021+ash2k@users.noreply.github.com> Co-authored-by: Sam Xie <sam@samxie.me> Co-authored-by: David Ashpole <dashpole@google.com> Co-authored-by: Alan Protasio <alanprot@gmail.com> Co-authored-by: Joshua MacDonald <jmacd@users.noreply.github.com> Co-authored-by: Mitch Usher <cull.methi@gmail.com> Co-authored-by: Gaurang Patel <mr.patelgaurang@gmail.com> Co-authored-by: Mike Dame <mikedame@google.com>
This commit is contained in:
18
.github/dependabot.yml
vendored
18
.github/dependabot.yml
vendored
@@ -28,6 +28,15 @@ updates:
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: sunday
|
||||
- package-ecosystem: gomod
|
||||
directory: /bridge/opencensus/opencensusmetric
|
||||
labels:
|
||||
- dependencies
|
||||
- go
|
||||
- Skip Changelog
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: sunday
|
||||
- package-ecosystem: gomod
|
||||
directory: /bridge/opencensus/test
|
||||
labels:
|
||||
@@ -73,15 +82,6 @@ updates:
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: sunday
|
||||
- package-ecosystem: gomod
|
||||
directory: /example/opencensus
|
||||
labels:
|
||||
- dependencies
|
||||
- go
|
||||
- Skip Changelog
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: sunday
|
||||
- package-ecosystem: gomod
|
||||
directory: /example/otel-collector
|
||||
labels:
|
||||
|
||||
34
CHANGELOG.md
34
CHANGELOG.md
@@ -8,6 +8,40 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
### Changed
|
||||
|
||||
- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification.
|
||||
Please see the package documentation for how the new SDK is initialized and configured. (#3175)
|
||||
|
||||
### Removed
|
||||
|
||||
- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed.
|
||||
A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed.
|
||||
A replacement package that supports the new metric SDK will be added back in a future release. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175)
|
||||
- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175)
|
||||
- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175)
|
||||
- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175)
|
||||
- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175)
|
||||
- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175)
|
||||
- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175)
|
||||
|
||||
## [1.10.0] - 2022-09-09
|
||||
|
||||
### Added
|
||||
|
||||
@@ -79,50 +79,3 @@ OpenCensus and OpenTelemetry APIs are not entirely compatible. If the bridge fi
|
||||
* Custom OpenCensus Samplers specified during StartSpan are ignored.
|
||||
* Links cannot be added to OpenCensus spans.
|
||||
* OpenTelemetry Debug or Deferred trace flags are dropped after an OpenCensus span is created.
|
||||
|
||||
## Metrics
|
||||
|
||||
### The problem: mixing libraries without mixing pipelines
|
||||
|
||||
The problem for monitoring is simpler than the problem for tracing, since there
|
||||
are no context propagation issues to deal with. However, it still is difficult
|
||||
for users to migrate an entire applications' monitoring at once. It
|
||||
should be possible to send metrics generated by OpenCensus libraries to an
|
||||
OpenTelemetry pipeline so that migrating a metric does not require maintaining
|
||||
separate export pipelines for OpenCensus and OpenTelemetry.
|
||||
|
||||
### The Exporter "wrapper" solution
|
||||
|
||||
The solution we use here is to allow wrapping an OpenTelemetry exporter such
|
||||
that it implements the OpenCensus exporter interfaces. This allows a single
|
||||
exporter to be used for metrics from *both* OpenCensus and OpenTelemetry.
|
||||
|
||||
### User Journey
|
||||
|
||||
Starting from an application using entirely OpenCensus APIs:
|
||||
|
||||
1. Instantiate OpenTelemetry SDK and Exporters.
|
||||
2. Replace OpenCensus exporters with a wrapped OpenTelemetry exporter from step 1.
|
||||
3. Migrate libraries individually from OpenCensus to OpenTelemetry
|
||||
4. Remove OpenCensus Exporters and configuration.
|
||||
|
||||
For example, to swap out the OpenCensus logging exporter for the OpenTelemetry stdout exporter:
|
||||
|
||||
```go
|
||||
import (
|
||||
"go.opencensus.io/metric/metricexport"
|
||||
"go.opentelemetry.io/otel/bridge/opencensus"
|
||||
"go.opentelemetry.io/otel/exporters/stdout"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
// With OpenCensus, you could have previously configured the logging exporter like this:
|
||||
// import logexporter "go.opencensus.io/examples/exporter"
|
||||
// exporter, _ := logexporter.NewLogExporter(logexporter.Options{})
|
||||
// Instead, we can create an equivalent using the OpenTelemetry stdout exporter:
|
||||
openTelemetryExporter, _ := stdout.New(stdout.WithPrettyPrint())
|
||||
exporter := opencensus.NewMetricExporter(openTelemetryExporter)
|
||||
|
||||
// Use the wrapped OpenTelemetry exporter like you normally would with OpenCensus
|
||||
intervalReader, _ := metricexport.NewIntervalReader(&metricexport.Reader{}, exporter)
|
||||
intervalReader.Start()
|
||||
```
|
||||
|
||||
@@ -1,157 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/metric/metricdata"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
)
|
||||
|
||||
var (
|
||||
errIncompatibleType = errors.New("incompatible type for aggregation")
|
||||
errEmpty = errors.New("points may not be empty")
|
||||
errBadPoint = errors.New("point cannot be converted")
|
||||
)
|
||||
|
||||
type recordFunc func(agg aggregation.Aggregation, end time.Time) error
|
||||
|
||||
// recordAggregationsFromPoints records one OpenTelemetry aggregation for
|
||||
// each OpenCensus point. Points may not be empty and must be either
|
||||
// all (int|float)64 or all *metricdata.Distribution.
|
||||
func recordAggregationsFromPoints(points []metricdata.Point, recorder recordFunc) error {
|
||||
if len(points) == 0 {
|
||||
return errEmpty
|
||||
}
|
||||
switch t := points[0].Value.(type) {
|
||||
case int64:
|
||||
return recordGaugePoints(points, recorder)
|
||||
case float64:
|
||||
return recordGaugePoints(points, recorder)
|
||||
case *metricdata.Distribution:
|
||||
return recordDistributionPoint(points, recorder)
|
||||
default:
|
||||
// TODO add *metricdata.Summary support
|
||||
return fmt.Errorf("%w: %v", errIncompatibleType, t)
|
||||
}
|
||||
}
|
||||
|
||||
var _ aggregation.Aggregation = &ocRawAggregator{}
|
||||
var _ aggregation.LastValue = &ocRawAggregator{}
|
||||
|
||||
// recordGaugePoints creates an OpenTelemetry aggregation from OpenCensus points.
|
||||
// Points may not be empty, and must only contain integers or floats.
|
||||
func recordGaugePoints(pts []metricdata.Point, recorder recordFunc) error {
|
||||
for _, pt := range pts {
|
||||
switch t := pt.Value.(type) {
|
||||
case int64:
|
||||
if err := recorder(&ocRawAggregator{
|
||||
value: number.NewInt64Number(pt.Value.(int64)),
|
||||
time: pt.Time,
|
||||
}, pt.Time); err != nil {
|
||||
return err
|
||||
}
|
||||
case float64:
|
||||
if err := recorder(&ocRawAggregator{
|
||||
value: number.NewFloat64Number(pt.Value.(float64)),
|
||||
time: pt.Time,
|
||||
}, pt.Time); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("%w: %v", errIncompatibleType, t)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ocRawAggregator struct {
|
||||
value number.Number
|
||||
time time.Time
|
||||
}
|
||||
|
||||
// Kind returns the kind of aggregation this is.
|
||||
func (o *ocRawAggregator) Kind() aggregation.Kind {
|
||||
return aggregation.LastValueKind
|
||||
}
|
||||
|
||||
// LastValue returns the last point.
|
||||
func (o *ocRawAggregator) LastValue() (number.Number, time.Time, error) {
|
||||
return o.value, o.time, nil
|
||||
}
|
||||
|
||||
var _ aggregation.Aggregation = &ocDistAggregator{}
|
||||
var _ aggregation.Histogram = &ocDistAggregator{}
|
||||
|
||||
// recordDistributionPoint creates an OpenTelemetry aggregation from
|
||||
// OpenCensus points. Points may not be empty, and must only contain
|
||||
// Distributions. The most recent disribution will be used in the aggregation.
|
||||
func recordDistributionPoint(pts []metricdata.Point, recorder recordFunc) error {
|
||||
// only use the most recent datapoint for now.
|
||||
pt := pts[len(pts)-1]
|
||||
val, ok := pt.Value.(*metricdata.Distribution)
|
||||
if !ok {
|
||||
return fmt.Errorf("%w: %v", errBadPoint, pt.Value)
|
||||
}
|
||||
bucketCounts := make([]uint64, len(val.Buckets))
|
||||
for i, bucket := range val.Buckets {
|
||||
if bucket.Count < 0 {
|
||||
return fmt.Errorf("%w: bucket count may not be negative", errBadPoint)
|
||||
}
|
||||
bucketCounts[i] = uint64(bucket.Count)
|
||||
}
|
||||
if val.Count < 0 {
|
||||
return fmt.Errorf("%w: count may not be negative", errBadPoint)
|
||||
}
|
||||
return recorder(&ocDistAggregator{
|
||||
sum: number.NewFloat64Number(val.Sum),
|
||||
count: uint64(val.Count),
|
||||
buckets: aggregation.Buckets{
|
||||
Boundaries: val.BucketOptions.Bounds,
|
||||
Counts: bucketCounts,
|
||||
},
|
||||
}, pts[len(pts)-1].Time)
|
||||
}
|
||||
|
||||
type ocDistAggregator struct {
|
||||
sum number.Number
|
||||
count uint64
|
||||
buckets aggregation.Buckets
|
||||
}
|
||||
|
||||
// Kind returns the kind of aggregation this is.
|
||||
func (o *ocDistAggregator) Kind() aggregation.Kind {
|
||||
return aggregation.HistogramKind
|
||||
}
|
||||
|
||||
// Sum returns the sum of values.
|
||||
func (o *ocDistAggregator) Sum() (number.Number, error) {
|
||||
return o.sum, nil
|
||||
}
|
||||
|
||||
// Count returns the number of values.
|
||||
func (o *ocDistAggregator) Count() (uint64, error) {
|
||||
return o.count, nil
|
||||
}
|
||||
|
||||
// Histogram returns the count of events in pre-determined buckets.
|
||||
func (o *ocDistAggregator) Histogram() (aggregation.Buckets, error) {
|
||||
return o.buckets, nil
|
||||
}
|
||||
@@ -1,322 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package opencensus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/metric/metricdata"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
)
|
||||
|
||||
func TestNewAggregationFromPoints(t *testing.T) {
|
||||
now := time.Now()
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
input []metricdata.Point
|
||||
expectedKind aggregation.Kind
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
desc: "no points",
|
||||
expectedErr: errEmpty,
|
||||
},
|
||||
{
|
||||
desc: "int point",
|
||||
input: []metricdata.Point{
|
||||
{
|
||||
Time: now,
|
||||
Value: int64(23),
|
||||
},
|
||||
},
|
||||
expectedKind: aggregation.LastValueKind,
|
||||
},
|
||||
{
|
||||
desc: "float point",
|
||||
input: []metricdata.Point{
|
||||
{
|
||||
Time: now,
|
||||
Value: float64(23),
|
||||
},
|
||||
},
|
||||
expectedKind: aggregation.LastValueKind,
|
||||
},
|
||||
{
|
||||
desc: "distribution point",
|
||||
input: []metricdata.Point{
|
||||
{
|
||||
Time: now,
|
||||
Value: &metricdata.Distribution{
|
||||
Count: 2,
|
||||
Sum: 55,
|
||||
BucketOptions: &metricdata.BucketOptions{
|
||||
Bounds: []float64{20, 30},
|
||||
},
|
||||
Buckets: []metricdata.Bucket{
|
||||
{Count: 1},
|
||||
{Count: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedKind: aggregation.HistogramKind,
|
||||
},
|
||||
{
|
||||
desc: "bad distribution bucket count",
|
||||
input: []metricdata.Point{
|
||||
{
|
||||
Time: now,
|
||||
Value: &metricdata.Distribution{
|
||||
Count: 2,
|
||||
Sum: 55,
|
||||
BucketOptions: &metricdata.BucketOptions{
|
||||
Bounds: []float64{20, 30},
|
||||
},
|
||||
Buckets: []metricdata.Bucket{
|
||||
// negative bucket
|
||||
{Count: -1},
|
||||
{Count: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErr: errBadPoint,
|
||||
},
|
||||
{
|
||||
desc: "bad distribution count",
|
||||
input: []metricdata.Point{
|
||||
{
|
||||
Time: now,
|
||||
Value: &metricdata.Distribution{
|
||||
// negative count
|
||||
Count: -2,
|
||||
Sum: 55,
|
||||
BucketOptions: &metricdata.BucketOptions{
|
||||
Bounds: []float64{20, 30},
|
||||
},
|
||||
Buckets: []metricdata.Bucket{
|
||||
{Count: 1},
|
||||
{Count: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErr: errBadPoint,
|
||||
},
|
||||
{
|
||||
desc: "incompatible point type bool",
|
||||
input: []metricdata.Point{
|
||||
{
|
||||
Time: now,
|
||||
Value: true,
|
||||
},
|
||||
},
|
||||
expectedErr: errIncompatibleType,
|
||||
},
|
||||
{
|
||||
desc: "dist is incompatible with raw points",
|
||||
input: []metricdata.Point{
|
||||
{
|
||||
Time: now,
|
||||
Value: int64(23),
|
||||
},
|
||||
{
|
||||
Time: now,
|
||||
Value: &metricdata.Distribution{
|
||||
Count: 2,
|
||||
Sum: 55,
|
||||
BucketOptions: &metricdata.BucketOptions{
|
||||
Bounds: []float64{20, 30},
|
||||
},
|
||||
Buckets: []metricdata.Bucket{
|
||||
{Count: 1},
|
||||
{Count: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErr: errIncompatibleType,
|
||||
},
|
||||
{
|
||||
desc: "int point is incompatible with dist",
|
||||
input: []metricdata.Point{
|
||||
{
|
||||
Time: now,
|
||||
Value: &metricdata.Distribution{
|
||||
Count: 2,
|
||||
Sum: 55,
|
||||
BucketOptions: &metricdata.BucketOptions{
|
||||
Bounds: []float64{20, 30},
|
||||
},
|
||||
Buckets: []metricdata.Bucket{
|
||||
{Count: 1},
|
||||
{Count: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Time: now,
|
||||
Value: int64(23),
|
||||
},
|
||||
},
|
||||
expectedErr: errBadPoint,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
var output []aggregation.Aggregation
|
||||
err := recordAggregationsFromPoints(tc.input, func(agg aggregation.Aggregation, ts time.Time) error {
|
||||
last := tc.input[len(tc.input)-1]
|
||||
if ts != last.Time {
|
||||
t.Errorf("incorrect timestamp %v != %v", ts, last.Time)
|
||||
}
|
||||
output = append(output, agg)
|
||||
return nil
|
||||
})
|
||||
if !errors.Is(err, tc.expectedErr) {
|
||||
t.Errorf("newAggregationFromPoints(%v) = err(%v), want err(%v)", tc.input, err, tc.expectedErr)
|
||||
}
|
||||
for _, out := range output {
|
||||
if tc.expectedErr == nil && out.Kind() != tc.expectedKind {
|
||||
t.Errorf("newAggregationFromPoints(%v) = %v, want %v", tc.input, out.Kind(), tc.expectedKind)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLastValueAggregation(t *testing.T) {
|
||||
now := time.Now()
|
||||
input := []metricdata.Point{
|
||||
{Value: int64(15), Time: now.Add(-time.Minute)},
|
||||
{Value: int64(-23), Time: now},
|
||||
}
|
||||
idx := 0
|
||||
err := recordAggregationsFromPoints(input, func(agg aggregation.Aggregation, end time.Time) error {
|
||||
if agg.Kind() != aggregation.LastValueKind {
|
||||
t.Errorf("recordAggregationsFromPoints(%v) = %v, want %v", input, agg.Kind(), aggregation.LastValueKind)
|
||||
}
|
||||
if end != input[idx].Time {
|
||||
t.Errorf("recordAggregationsFromPoints(%v).end() = %v, want %v", input, end, input[idx].Time)
|
||||
}
|
||||
pointsLV, ok := agg.(aggregation.LastValue)
|
||||
if !ok {
|
||||
t.Errorf("recordAggregationsFromPoints(%v) = %v does not implement the aggregation.LastValue interface", input, agg)
|
||||
}
|
||||
lv, ts, _ := pointsLV.LastValue()
|
||||
if lv.AsInt64() != input[idx].Value {
|
||||
t.Errorf("recordAggregationsFromPoints(%v) = %v, want %v", input, lv.AsInt64(), input[idx].Value)
|
||||
}
|
||||
if ts != input[idx].Time {
|
||||
t.Errorf("recordAggregationsFromPoints(%v) = %v, want %v", input, ts, input[idx].Time)
|
||||
}
|
||||
idx++
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("recordAggregationsFromPoints(%v) = unexpected error %v", input, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistogramAggregation(t *testing.T) {
|
||||
now := time.Now()
|
||||
input := []metricdata.Point{
|
||||
{
|
||||
Value: &metricdata.Distribution{
|
||||
Count: 0,
|
||||
Sum: 0,
|
||||
BucketOptions: &metricdata.BucketOptions{
|
||||
Bounds: []float64{20, 30},
|
||||
},
|
||||
Buckets: []metricdata.Bucket{
|
||||
{Count: 0},
|
||||
{Count: 0},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Time: now,
|
||||
Value: &metricdata.Distribution{
|
||||
Count: 2,
|
||||
Sum: 55,
|
||||
BucketOptions: &metricdata.BucketOptions{
|
||||
Bounds: []float64{20, 30},
|
||||
},
|
||||
Buckets: []metricdata.Bucket{
|
||||
{Count: 1},
|
||||
{Count: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var output aggregation.Aggregation
|
||||
var end time.Time
|
||||
err := recordAggregationsFromPoints(input, func(argAgg aggregation.Aggregation, argEnd time.Time) error {
|
||||
output = argAgg
|
||||
end = argEnd
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("recordAggregationsFromPoints(%v) = err(%v), want <nil>", input, err)
|
||||
}
|
||||
if output.Kind() != aggregation.HistogramKind {
|
||||
t.Errorf("recordAggregationsFromPoints(%v) = %v, want %v", input, output.Kind(), aggregation.HistogramKind)
|
||||
}
|
||||
if !end.Equal(now) {
|
||||
t.Errorf("recordAggregationsFromPoints(%v).end() = %v, want %v", input, end, now)
|
||||
}
|
||||
distAgg, ok := output.(aggregation.Histogram)
|
||||
if !ok {
|
||||
t.Errorf("recordAggregationsFromPoints(%v) = %v does not implement the aggregation.Points interface", input, output)
|
||||
}
|
||||
sum, err := distAgg.Sum()
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected err: %v", err)
|
||||
}
|
||||
if sum.AsFloat64() != float64(55) {
|
||||
t.Errorf("recordAggregationsFromPoints(%v).Sum() = %v, want %v", input, sum.AsFloat64(), float64(55))
|
||||
}
|
||||
count, err := distAgg.Count()
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected err: %v", err)
|
||||
}
|
||||
if count != 2 {
|
||||
t.Errorf("recordAggregationsFromPoints(%v).Count() = %v, want %v", input, count, 2)
|
||||
}
|
||||
hist, err := distAgg.Histogram()
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected err: %v", err)
|
||||
}
|
||||
inputBucketBoundaries := []float64{20, 30}
|
||||
if len(hist.Boundaries) != len(inputBucketBoundaries) {
|
||||
t.Fatalf("recordAggregationsFromPoints(%v).Histogram() produced %d boundaries, want %d boundaries", input, len(hist.Boundaries), len(inputBucketBoundaries))
|
||||
}
|
||||
for i, b := range hist.Boundaries {
|
||||
if b != inputBucketBoundaries[i] {
|
||||
t.Errorf("recordAggregationsFromPoints(%v).Histogram().Boundaries[%d] = %v, want %v", input, i, b, inputBucketBoundaries[i])
|
||||
}
|
||||
}
|
||||
inputBucketCounts := []uint64{1, 1}
|
||||
if len(hist.Counts) != len(inputBucketCounts) {
|
||||
t.Fatalf("recordAggregationsFromPoints(%v).Histogram() produced %d buckets, want %d buckets", input, len(hist.Counts), len(inputBucketCounts))
|
||||
}
|
||||
for i, c := range hist.Counts {
|
||||
if c != inputBucketCounts[i] {
|
||||
t.Errorf("recordAggregationsFromPoints(%v).Histogram().Counts[%d] = %d, want %d", input, i, c, inputBucketCounts[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,186 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/metric/metricdata"
|
||||
"go.opencensus.io/metric/metricexport"
|
||||
ocresource "go.opencensus.io/resource"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
"go.opentelemetry.io/otel/metric/unit"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
var errConversion = errors.New("unable to convert from OpenCensus to OpenTelemetry")
|
||||
|
||||
// NewMetricExporter returns an OpenCensus exporter that exports to an
|
||||
// OpenTelemetry exporter.
|
||||
func NewMetricExporter(base export.Exporter) metricexport.Exporter {
|
||||
return &exporter{base: base}
|
||||
}
|
||||
|
||||
// exporter implements the OpenCensus metric Exporter interface using an
|
||||
// OpenTelemetry base exporter.
|
||||
type exporter struct {
|
||||
base export.Exporter
|
||||
}
|
||||
|
||||
// ExportMetrics implements the OpenCensus metric Exporter interface.
|
||||
func (e *exporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error {
|
||||
res := resource.Empty()
|
||||
if len(metrics) != 0 {
|
||||
res = convertResource(metrics[0].Resource)
|
||||
}
|
||||
return e.base.Export(ctx, res, &censusLibraryReader{metrics: metrics})
|
||||
}
|
||||
|
||||
type censusLibraryReader struct {
|
||||
metrics []*metricdata.Metric
|
||||
}
|
||||
|
||||
func (r censusLibraryReader) ForEach(readerFunc func(instrumentation.Library, export.Reader) error) error {
|
||||
return readerFunc(instrumentation.Library{
|
||||
Name: "OpenCensus Bridge",
|
||||
}, &metricReader{metrics: r.metrics})
|
||||
}
|
||||
|
||||
type metricReader struct {
|
||||
// RWMutex implements locking for the `Reader` interface.
|
||||
sync.RWMutex
|
||||
metrics []*metricdata.Metric
|
||||
}
|
||||
|
||||
var _ export.Reader = &metricReader{}
|
||||
|
||||
// ForEach iterates through the metrics data, synthesizing an
|
||||
// export.Record with the appropriate aggregation for the exporter.
|
||||
func (d *metricReader) ForEach(_ aggregation.TemporalitySelector, f func(export.Record) error) error {
|
||||
for _, m := range d.metrics {
|
||||
descriptor, err := convertDescriptor(m.Descriptor)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
continue
|
||||
}
|
||||
for _, ts := range m.TimeSeries {
|
||||
if len(ts.Points) == 0 {
|
||||
continue
|
||||
}
|
||||
attrs, err := convertAttrs(m.Descriptor.LabelKeys, ts.LabelValues)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
continue
|
||||
}
|
||||
err = recordAggregationsFromPoints(
|
||||
ts.Points,
|
||||
func(agg aggregation.Aggregation, end time.Time) error {
|
||||
return f(export.NewRecord(
|
||||
&descriptor,
|
||||
&attrs,
|
||||
agg,
|
||||
ts.StartTime,
|
||||
end,
|
||||
))
|
||||
})
|
||||
if err != nil && !errors.Is(err, aggregation.ErrNoData) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// convertAttrs converts from OpenCensus attribute keys and values to an
|
||||
// OpenTelemetry attribute Set.
|
||||
func convertAttrs(keys []metricdata.LabelKey, values []metricdata.LabelValue) (attribute.Set, error) {
|
||||
if len(keys) != len(values) {
|
||||
return attribute.NewSet(), fmt.Errorf("%w different number of label keys (%d) and values (%d)", errConversion, len(keys), len(values))
|
||||
}
|
||||
attrs := []attribute.KeyValue{}
|
||||
for i, lv := range values {
|
||||
if !lv.Present {
|
||||
continue
|
||||
}
|
||||
attrs = append(attrs, attribute.KeyValue{
|
||||
Key: attribute.Key(keys[i].Key),
|
||||
Value: attribute.StringValue(lv.Value),
|
||||
})
|
||||
}
|
||||
return attribute.NewSet(attrs...), nil
|
||||
}
|
||||
|
||||
// convertResource converts an OpenCensus Resource to an OpenTelemetry Resource
|
||||
// Note: the ocresource.Resource Type field is not used.
|
||||
func convertResource(res *ocresource.Resource) *resource.Resource {
|
||||
attrs := []attribute.KeyValue{}
|
||||
if res == nil {
|
||||
return nil
|
||||
}
|
||||
for k, v := range res.Labels {
|
||||
attrs = append(attrs, attribute.KeyValue{Key: attribute.Key(k), Value: attribute.StringValue(v)})
|
||||
}
|
||||
return resource.NewSchemaless(attrs...)
|
||||
}
|
||||
|
||||
// convertDescriptor converts an OpenCensus Descriptor to an OpenTelemetry Descriptor.
|
||||
func convertDescriptor(ocDescriptor metricdata.Descriptor) (sdkapi.Descriptor, error) {
|
||||
var (
|
||||
nkind number.Kind
|
||||
ikind sdkapi.InstrumentKind
|
||||
)
|
||||
switch ocDescriptor.Type {
|
||||
case metricdata.TypeGaugeInt64:
|
||||
nkind = number.Int64Kind
|
||||
ikind = sdkapi.GaugeObserverInstrumentKind
|
||||
case metricdata.TypeGaugeFloat64:
|
||||
nkind = number.Float64Kind
|
||||
ikind = sdkapi.GaugeObserverInstrumentKind
|
||||
case metricdata.TypeCumulativeInt64:
|
||||
nkind = number.Int64Kind
|
||||
ikind = sdkapi.CounterObserverInstrumentKind
|
||||
case metricdata.TypeCumulativeFloat64:
|
||||
nkind = number.Float64Kind
|
||||
ikind = sdkapi.CounterObserverInstrumentKind
|
||||
default:
|
||||
// Includes TypeGaugeDistribution, TypeCumulativeDistribution, TypeSummary
|
||||
return sdkapi.Descriptor{}, fmt.Errorf("%w; descriptor type: %v", errConversion, ocDescriptor.Type)
|
||||
}
|
||||
opts := []instrument.Option{
|
||||
instrument.WithDescription(ocDescriptor.Description),
|
||||
}
|
||||
switch ocDescriptor.Unit {
|
||||
case metricdata.UnitDimensionless:
|
||||
opts = append(opts, instrument.WithUnit(unit.Dimensionless))
|
||||
case metricdata.UnitBytes:
|
||||
opts = append(opts, instrument.WithUnit(unit.Bytes))
|
||||
case metricdata.UnitMilliseconds:
|
||||
opts = append(opts, instrument.WithUnit(unit.Milliseconds))
|
||||
}
|
||||
cfg := instrument.NewConfig(opts...)
|
||||
return sdkapi.NewDescriptor(ocDescriptor.Name, ikind, nkind, cfg.Description(), cfg.Unit()), nil
|
||||
}
|
||||
@@ -1,475 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package opencensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/metric/metricdata"
|
||||
ocresource "go.opencensus.io/resource"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
"go.opentelemetry.io/otel/metric/unit"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/controller/controllertest"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
type fakeExporter struct {
|
||||
export.Exporter
|
||||
records []export.Record
|
||||
resource *resource.Resource
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeExporter) Export(ctx context.Context, res *resource.Resource, ilr export.InstrumentationLibraryReader) error {
|
||||
return controllertest.ReadAll(ilr, aggregation.StatelessTemporalitySelector(),
|
||||
func(_ instrumentation.Library, record export.Record) error {
|
||||
f.resource = res
|
||||
f.records = append(f.records, record)
|
||||
return f.err
|
||||
})
|
||||
}
|
||||
|
||||
type fakeErrorHandler struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeErrorHandler) Handle(err error) {
|
||||
f.err = err
|
||||
}
|
||||
|
||||
func (f *fakeErrorHandler) matches(err error) error {
|
||||
// make sure err is cleared for the next test
|
||||
defer func() { f.err = nil }()
|
||||
if !errors.Is(f.err, err) {
|
||||
return fmt.Errorf("err(%v), want err(%v)", f.err, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestExportMetrics(t *testing.T) {
|
||||
now := time.Now()
|
||||
basicDesc := metrictest.NewDescriptor(
|
||||
"",
|
||||
sdkapi.GaugeObserverInstrumentKind,
|
||||
number.Int64Kind,
|
||||
)
|
||||
fakeErrorHandler := &fakeErrorHandler{}
|
||||
otel.SetErrorHandler(fakeErrorHandler)
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
input []*metricdata.Metric
|
||||
exportErr error
|
||||
expected []export.Record
|
||||
expectedResource *resource.Resource
|
||||
expectedHandledError error
|
||||
}{
|
||||
{
|
||||
desc: "no metrics",
|
||||
},
|
||||
{
|
||||
desc: "metric without points is dropped",
|
||||
input: []*metricdata.Metric{
|
||||
{
|
||||
TimeSeries: []*metricdata.TimeSeries{
|
||||
{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "descriptor conversion error",
|
||||
input: []*metricdata.Metric{
|
||||
// TypeGaugeDistribution isn't supported
|
||||
{Descriptor: metricdata.Descriptor{Type: metricdata.TypeGaugeDistribution}},
|
||||
},
|
||||
expectedHandledError: errConversion,
|
||||
},
|
||||
{
|
||||
desc: "attrs conversion error",
|
||||
input: []*metricdata.Metric{
|
||||
{
|
||||
// No descriptor with attribute keys.
|
||||
TimeSeries: []*metricdata.TimeSeries{
|
||||
// 1 attribute value, which doens't exist in keys.
|
||||
{
|
||||
LabelValues: []metricdata.LabelValue{{Value: "foo", Present: true}},
|
||||
Points: []metricdata.Point{
|
||||
{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedHandledError: errConversion,
|
||||
},
|
||||
{
|
||||
desc: "unsupported summary point type",
|
||||
input: []*metricdata.Metric{
|
||||
{
|
||||
TimeSeries: []*metricdata.TimeSeries{
|
||||
{
|
||||
Points: []metricdata.Point{
|
||||
{Value: &metricdata.Summary{}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
exportErr: errIncompatibleType,
|
||||
},
|
||||
{
|
||||
desc: "success",
|
||||
input: []*metricdata.Metric{
|
||||
{
|
||||
Resource: &ocresource.Resource{
|
||||
Labels: map[string]string{
|
||||
"R1": "V1",
|
||||
"R2": "V2",
|
||||
},
|
||||
},
|
||||
TimeSeries: []*metricdata.TimeSeries{
|
||||
{
|
||||
StartTime: now,
|
||||
Points: []metricdata.Point{
|
||||
{Value: int64(123), Time: now},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedResource: resource.NewSchemaless(
|
||||
attribute.String("R1", "V1"),
|
||||
attribute.String("R2", "V2"),
|
||||
),
|
||||
expected: []export.Record{
|
||||
export.NewRecord(
|
||||
&basicDesc,
|
||||
attribute.EmptySet(),
|
||||
&ocRawAggregator{
|
||||
value: number.NewInt64Number(123),
|
||||
time: now,
|
||||
},
|
||||
now,
|
||||
now,
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "export error after success",
|
||||
input: []*metricdata.Metric{
|
||||
{
|
||||
TimeSeries: []*metricdata.TimeSeries{
|
||||
{
|
||||
StartTime: now,
|
||||
Points: []metricdata.Point{
|
||||
{Value: int64(123), Time: now},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []export.Record{
|
||||
export.NewRecord(
|
||||
&basicDesc,
|
||||
attribute.EmptySet(),
|
||||
&ocRawAggregator{
|
||||
value: number.NewInt64Number(123),
|
||||
time: now,
|
||||
},
|
||||
now,
|
||||
now,
|
||||
),
|
||||
},
|
||||
exportErr: errors.New("failed to export"),
|
||||
},
|
||||
{
|
||||
desc: "partial success sends correct metrics and drops incorrect metrics with handled err",
|
||||
input: []*metricdata.Metric{
|
||||
{
|
||||
TimeSeries: []*metricdata.TimeSeries{
|
||||
{
|
||||
StartTime: now,
|
||||
Points: []metricdata.Point{
|
||||
{Value: int64(123), Time: now},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// TypeGaugeDistribution isn't supported
|
||||
{Descriptor: metricdata.Descriptor{Type: metricdata.TypeGaugeDistribution}},
|
||||
},
|
||||
expected: []export.Record{
|
||||
export.NewRecord(
|
||||
&basicDesc,
|
||||
attribute.EmptySet(),
|
||||
&ocRawAggregator{
|
||||
value: number.NewInt64Number(123),
|
||||
time: now,
|
||||
},
|
||||
now,
|
||||
now,
|
||||
),
|
||||
},
|
||||
expectedHandledError: errConversion,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
fakeExporter := &fakeExporter{err: tc.exportErr}
|
||||
err := NewMetricExporter(fakeExporter).ExportMetrics(context.Background(), tc.input)
|
||||
if !errors.Is(err, tc.exportErr) {
|
||||
t.Errorf("NewMetricExporter(%+v) = err(%v), want err(%v)", tc.input, err, tc.exportErr)
|
||||
}
|
||||
// Check the global error handler, since we don't return errors
|
||||
// which occur during conversion.
|
||||
err = fakeErrorHandler.matches(tc.expectedHandledError)
|
||||
if err != nil {
|
||||
t.Fatalf("ExportMetrics(%+v) = %v", tc.input, err)
|
||||
}
|
||||
output := fakeExporter.records
|
||||
if len(tc.expected) != len(output) {
|
||||
t.Fatalf("ExportMetrics(%+v) = %d records, want %d records", tc.input, len(output), len(tc.expected))
|
||||
}
|
||||
if fakeExporter.resource.String() != tc.expectedResource.String() {
|
||||
t.Errorf("ExportMetrics(%+v)[i].Resource() = %+v, want %+v", tc.input, fakeExporter.resource.String(), tc.expectedResource.String())
|
||||
}
|
||||
for i, expected := range tc.expected {
|
||||
if output[i].StartTime() != expected.StartTime() {
|
||||
t.Errorf("ExportMetrics(%+v)[i].StartTime() = %+v, want %+v", tc.input, output[i].StartTime(), expected.StartTime())
|
||||
}
|
||||
if output[i].EndTime() != expected.EndTime() {
|
||||
t.Errorf("ExportMetrics(%+v)[i].EndTime() = %+v, want %+v", tc.input, output[i].EndTime(), expected.EndTime())
|
||||
}
|
||||
if output[i].Descriptor().Name() != expected.Descriptor().Name() {
|
||||
t.Errorf("ExportMetrics(%+v)[i].Descriptor() = %+v, want %+v", tc.input, output[i].Descriptor().Name(), expected.Descriptor().Name())
|
||||
}
|
||||
// Don't bother with a complete check of the descriptor.
|
||||
// That is checked as part of descriptor conversion tests below.
|
||||
if !output[i].Attributes().Equals(expected.Attributes()) {
|
||||
t.Errorf("ExportMetrics(%+v)[i].Attributes() = %+v, want %+v", tc.input, output[i].Attributes(), expected.Attributes())
|
||||
}
|
||||
if output[i].Aggregation().Kind() != expected.Aggregation().Kind() {
|
||||
t.Errorf("ExportMetrics(%+v)[i].Aggregation() = %+v, want %+v", tc.input, output[i].Aggregation().Kind(), expected.Aggregation().Kind())
|
||||
}
|
||||
// Don't bother checking the contents of the points aggregation.
|
||||
// Those tests are done with the aggregations themselves
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertAttributes(t *testing.T) {
|
||||
setWithMultipleKeys := attribute.NewSet(
|
||||
attribute.KeyValue{Key: attribute.Key("first"), Value: attribute.StringValue("1")},
|
||||
attribute.KeyValue{Key: attribute.Key("second"), Value: attribute.StringValue("2")},
|
||||
)
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
inputKeys []metricdata.LabelKey
|
||||
inputValues []metricdata.LabelValue
|
||||
expected *attribute.Set
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
desc: "no attributes",
|
||||
expected: attribute.EmptySet(),
|
||||
},
|
||||
{
|
||||
desc: "different numbers of keys and values",
|
||||
inputKeys: []metricdata.LabelKey{{Key: "foo"}},
|
||||
expected: attribute.EmptySet(),
|
||||
expectedErr: errConversion,
|
||||
},
|
||||
{
|
||||
desc: "multiple keys and values",
|
||||
inputKeys: []metricdata.LabelKey{{Key: "first"}, {Key: "second"}},
|
||||
inputValues: []metricdata.LabelValue{
|
||||
{Value: "1", Present: true},
|
||||
{Value: "2", Present: true},
|
||||
},
|
||||
expected: &setWithMultipleKeys,
|
||||
},
|
||||
{
|
||||
desc: "multiple keys and values with some not present",
|
||||
inputKeys: []metricdata.LabelKey{{Key: "first"}, {Key: "second"}, {Key: "third"}},
|
||||
inputValues: []metricdata.LabelValue{
|
||||
{Value: "1", Present: true},
|
||||
{Value: "2", Present: true},
|
||||
{Present: false},
|
||||
},
|
||||
expected: &setWithMultipleKeys,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
output, err := convertAttrs(tc.inputKeys, tc.inputValues)
|
||||
if !errors.Is(err, tc.expectedErr) {
|
||||
t.Errorf("convertAttrs(keys: %v, values: %v) = err(%v), want err(%v)", tc.inputKeys, tc.inputValues, err, tc.expectedErr)
|
||||
}
|
||||
if !output.Equals(tc.expected) {
|
||||
t.Errorf("convertAttrs(keys: %v, values: %v) = %+v, want %+v", tc.inputKeys, tc.inputValues, output.ToSlice(), tc.expected.ToSlice())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestConvertResource(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
input *ocresource.Resource
|
||||
expected *resource.Resource
|
||||
}{
|
||||
{
|
||||
desc: "nil resource",
|
||||
},
|
||||
{
|
||||
desc: "empty resource",
|
||||
input: &ocresource.Resource{
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
expected: resource.NewSchemaless(),
|
||||
},
|
||||
{
|
||||
desc: "resource with attributes",
|
||||
input: &ocresource.Resource{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
"tick": "tock",
|
||||
},
|
||||
},
|
||||
expected: resource.NewSchemaless(
|
||||
attribute.KeyValue{Key: attribute.Key("foo"), Value: attribute.StringValue("bar")},
|
||||
attribute.KeyValue{Key: attribute.Key("tick"), Value: attribute.StringValue("tock")},
|
||||
),
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
output := convertResource(tc.input)
|
||||
if !output.Equal(tc.expected) {
|
||||
t.Errorf("convertResource(%v) = %+v, want %+v", tc.input, output, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestConvertDescriptor(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
input metricdata.Descriptor
|
||||
expected sdkapi.Descriptor
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
desc: "empty descriptor",
|
||||
expected: metrictest.NewDescriptor(
|
||||
"",
|
||||
sdkapi.GaugeObserverInstrumentKind,
|
||||
number.Int64Kind,
|
||||
),
|
||||
},
|
||||
{
|
||||
desc: "gauge int64 bytes",
|
||||
input: metricdata.Descriptor{
|
||||
Name: "foo",
|
||||
Description: "bar",
|
||||
Unit: metricdata.UnitBytes,
|
||||
Type: metricdata.TypeGaugeInt64,
|
||||
},
|
||||
expected: metrictest.NewDescriptor(
|
||||
"foo",
|
||||
sdkapi.GaugeObserverInstrumentKind,
|
||||
number.Int64Kind,
|
||||
instrument.WithDescription("bar"),
|
||||
instrument.WithUnit(unit.Bytes),
|
||||
),
|
||||
},
|
||||
{
|
||||
desc: "gauge float64 ms",
|
||||
input: metricdata.Descriptor{
|
||||
Name: "foo",
|
||||
Description: "bar",
|
||||
Unit: metricdata.UnitMilliseconds,
|
||||
Type: metricdata.TypeGaugeFloat64,
|
||||
},
|
||||
expected: metrictest.NewDescriptor(
|
||||
"foo",
|
||||
sdkapi.GaugeObserverInstrumentKind,
|
||||
number.Float64Kind,
|
||||
instrument.WithDescription("bar"),
|
||||
instrument.WithUnit(unit.Milliseconds),
|
||||
),
|
||||
},
|
||||
{
|
||||
desc: "cumulative int64 dimensionless",
|
||||
input: metricdata.Descriptor{
|
||||
Name: "foo",
|
||||
Description: "bar",
|
||||
Unit: metricdata.UnitDimensionless,
|
||||
Type: metricdata.TypeCumulativeInt64,
|
||||
},
|
||||
expected: metrictest.NewDescriptor(
|
||||
"foo",
|
||||
sdkapi.CounterObserverInstrumentKind,
|
||||
number.Int64Kind,
|
||||
instrument.WithDescription("bar"),
|
||||
instrument.WithUnit(unit.Dimensionless),
|
||||
),
|
||||
},
|
||||
{
|
||||
desc: "cumulative float64 dimensionless",
|
||||
input: metricdata.Descriptor{
|
||||
Name: "foo",
|
||||
Description: "bar",
|
||||
Unit: metricdata.UnitDimensionless,
|
||||
Type: metricdata.TypeCumulativeFloat64,
|
||||
},
|
||||
expected: metrictest.NewDescriptor(
|
||||
"foo",
|
||||
sdkapi.CounterObserverInstrumentKind,
|
||||
number.Float64Kind,
|
||||
instrument.WithDescription("bar"),
|
||||
instrument.WithUnit(unit.Dimensionless),
|
||||
),
|
||||
},
|
||||
{
|
||||
desc: "incompatible TypeCumulativeDistribution",
|
||||
input: metricdata.Descriptor{
|
||||
Name: "foo",
|
||||
Description: "bar",
|
||||
Type: metricdata.TypeCumulativeDistribution,
|
||||
},
|
||||
expectedErr: errConversion,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
output, err := convertDescriptor(tc.input)
|
||||
if !errors.Is(err, tc.expectedErr) {
|
||||
t.Errorf("convertDescriptor(%v) = err(%v), want err(%v)", tc.input, err, tc.expectedErr)
|
||||
}
|
||||
if output != tc.expected {
|
||||
t.Errorf("convertDescriptor(%v) = %+v, want %+v", tc.input, output, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -5,26 +5,15 @@ go 1.17
|
||||
require (
|
||||
go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f
|
||||
go.opentelemetry.io/otel v1.10.0
|
||||
go.opentelemetry.io/otel/metric v0.31.0
|
||||
go.opentelemetry.io/otel/sdk v1.10.0
|
||||
go.opentelemetry.io/otel/sdk/metric v0.31.0
|
||||
go.opentelemetry.io/otel/trace v1.10.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 // indirect
|
||||
)
|
||||
|
||||
replace go.opentelemetry.io/otel => ../..
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk => ../../sdk
|
||||
|
||||
replace go.opentelemetry.io/otel/metric => ../../metric
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric
|
||||
|
||||
replace go.opentelemetry.io/otel/trace => ../../trace
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -42,8 +40,6 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 h1:iGu644GcxtEcrInvDsQRCwJjtCIOlT2V7IRt6ah2Whw=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// The metrictest package is a collection of tools used to make testing parts of
|
||||
// the SDK easier.
|
||||
|
||||
package metrictest // import "go.opentelemetry.io/otel/sdk/metric/metrictest"
|
||||
/*
|
||||
Package opencensusmetric provides a metric bridge from OpenCensus to OpenTelemetry.
|
||||
*/
|
||||
package opencensusmetric // import "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric"
|
||||
28
bridge/opencensus/opencensusmetric/go.mod
Normal file
28
bridge/opencensus/opencensusmetric/go.mod
Normal file
@@ -0,0 +1,28 @@
|
||||
module go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric
|
||||
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
go.opencensus.io v0.23.0
|
||||
go.opentelemetry.io/otel v1.10.0
|
||||
go.opentelemetry.io/otel/metric v0.0.0-00010101000000-000000000000
|
||||
go.opentelemetry.io/otel/sdk/metric v0.0.0-00010101000000-000000000000
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
go.opentelemetry.io/otel/sdk v0.0.0-00010101000000-000000000000 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.10.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 // indirect
|
||||
)
|
||||
|
||||
replace go.opentelemetry.io/otel => ../../..
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk => ../../../sdk
|
||||
|
||||
replace go.opentelemetry.io/otel/metric => ../../../metric
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric
|
||||
|
||||
replace go.opentelemetry.io/otel/trace => ../../../trace
|
||||
98
bridge/opencensus/opencensusmetric/go.sum
Normal file
98
bridge/opencensus/opencensusmetric/go.sum
Normal file
@@ -0,0 +1,98 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 h1:iGu644GcxtEcrInvDsQRCwJjtCIOlT2V7IRt6ah2Whw=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
219
bridge/opencensus/opencensusmetric/internal/metric.go
Normal file
219
bridge/opencensus/opencensusmetric/internal/metric.go
Normal file
@@ -0,0 +1,219 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric/internal"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
ocmetricdata "go.opencensus.io/metric/metricdata"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/unit"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
var (
|
||||
errConversion = errors.New("converting from OpenCensus to OpenTelemetry")
|
||||
errAggregationType = errors.New("unsupported OpenCensus aggregation type")
|
||||
errMismatchedValueTypes = errors.New("wrong value type for data point")
|
||||
errNumberDataPoint = errors.New("converting a number data point")
|
||||
errHistogramDataPoint = errors.New("converting a histogram data point")
|
||||
errNegativeDistributionCount = errors.New("distribution count is negative")
|
||||
errNegativeBucketCount = errors.New("distribution bucket count is negative")
|
||||
errMismatchedAttributeKeyValues = errors.New("mismatched number of attribute keys and values")
|
||||
)
|
||||
|
||||
// ConvertMetrics converts metric data from OpenCensus to OpenTelemetry.
|
||||
func ConvertMetrics(ocmetrics []*ocmetricdata.Metric) ([]metricdata.Metrics, error) {
|
||||
otelMetrics := make([]metricdata.Metrics, 0, len(ocmetrics))
|
||||
var errInfo []string
|
||||
for _, ocm := range ocmetrics {
|
||||
if ocm == nil {
|
||||
continue
|
||||
}
|
||||
agg, err := convertAggregation(ocm)
|
||||
if err != nil {
|
||||
errInfo = append(errInfo, err.Error())
|
||||
continue
|
||||
}
|
||||
otelMetrics = append(otelMetrics, metricdata.Metrics{
|
||||
Name: ocm.Descriptor.Name,
|
||||
Description: ocm.Descriptor.Description,
|
||||
Unit: convertUnit(ocm.Descriptor.Unit),
|
||||
Data: agg,
|
||||
})
|
||||
}
|
||||
var aggregatedError error
|
||||
if len(errInfo) > 0 {
|
||||
aggregatedError = fmt.Errorf("%w: %q", errConversion, errInfo)
|
||||
}
|
||||
return otelMetrics, aggregatedError
|
||||
}
|
||||
|
||||
// convertAggregation produces an aggregation based on the OpenCensus Metric.
|
||||
func convertAggregation(metric *ocmetricdata.Metric) (metricdata.Aggregation, error) {
|
||||
labelKeys := metric.Descriptor.LabelKeys
|
||||
switch metric.Descriptor.Type {
|
||||
case ocmetricdata.TypeGaugeInt64:
|
||||
return convertGauge[int64](labelKeys, metric.TimeSeries)
|
||||
case ocmetricdata.TypeGaugeFloat64:
|
||||
return convertGauge[float64](labelKeys, metric.TimeSeries)
|
||||
case ocmetricdata.TypeCumulativeInt64:
|
||||
return convertSum[int64](labelKeys, metric.TimeSeries)
|
||||
case ocmetricdata.TypeCumulativeFloat64:
|
||||
return convertSum[float64](labelKeys, metric.TimeSeries)
|
||||
case ocmetricdata.TypeCumulativeDistribution:
|
||||
return convertHistogram(labelKeys, metric.TimeSeries)
|
||||
// TODO: Support summaries, once it is in the OTel data types.
|
||||
}
|
||||
return nil, fmt.Errorf("%w: %q", errAggregationType, metric.Descriptor.Type)
|
||||
}
|
||||
|
||||
// convertGauge converts an OpenCensus gauge to an OpenTelemetry gauge aggregation.
|
||||
func convertGauge[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Gauge[N], error) {
|
||||
points, err := convertNumberDataPoints[N](labelKeys, ts)
|
||||
return metricdata.Gauge[N]{DataPoints: points}, err
|
||||
}
|
||||
|
||||
// convertSum converts an OpenCensus cumulative to an OpenTelemetry sum aggregation.
|
||||
func convertSum[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Sum[N], error) {
|
||||
points, err := convertNumberDataPoints[N](labelKeys, ts)
|
||||
// OpenCensus sums are always Cumulative
|
||||
return metricdata.Sum[N]{DataPoints: points, Temporality: metricdata.CumulativeTemporality}, err
|
||||
}
|
||||
|
||||
// convertNumberDataPoints converts OpenCensus TimeSeries to OpenTelemetry DataPoints.
|
||||
func convertNumberDataPoints[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) ([]metricdata.DataPoint[N], error) {
|
||||
var points []metricdata.DataPoint[N]
|
||||
var errInfo []string
|
||||
for _, t := range ts {
|
||||
attrs, err := convertAttrs(labelKeys, t.LabelValues)
|
||||
if err != nil {
|
||||
errInfo = append(errInfo, err.Error())
|
||||
continue
|
||||
}
|
||||
for _, p := range t.Points {
|
||||
v, ok := p.Value.(N)
|
||||
if !ok {
|
||||
errInfo = append(errInfo, fmt.Sprintf("%v: %q", errMismatchedValueTypes, p.Value))
|
||||
continue
|
||||
}
|
||||
points = append(points, metricdata.DataPoint[N]{
|
||||
Attributes: attrs,
|
||||
StartTime: t.StartTime,
|
||||
Time: p.Time,
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
}
|
||||
var aggregatedError error
|
||||
if len(errInfo) > 0 {
|
||||
aggregatedError = fmt.Errorf("%w: %v", errNumberDataPoint, errInfo)
|
||||
}
|
||||
return points, aggregatedError
|
||||
}
|
||||
|
||||
// convertHistogram converts OpenCensus Distribution timeseries to an
|
||||
// OpenTelemetry Histogram aggregation.
|
||||
func convertHistogram(labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Histogram, error) {
|
||||
points := make([]metricdata.HistogramDataPoint, 0, len(ts))
|
||||
var errInfo []string
|
||||
for _, t := range ts {
|
||||
attrs, err := convertAttrs(labelKeys, t.LabelValues)
|
||||
if err != nil {
|
||||
errInfo = append(errInfo, err.Error())
|
||||
continue
|
||||
}
|
||||
for _, p := range t.Points {
|
||||
dist, ok := p.Value.(*ocmetricdata.Distribution)
|
||||
if !ok {
|
||||
errInfo = append(errInfo, fmt.Sprintf("%v: %d", errMismatchedValueTypes, p.Value))
|
||||
continue
|
||||
}
|
||||
bucketCounts, err := convertBucketCounts(dist.Buckets)
|
||||
if err != nil {
|
||||
errInfo = append(errInfo, err.Error())
|
||||
continue
|
||||
}
|
||||
if dist.Count < 0 {
|
||||
errInfo = append(errInfo, fmt.Sprintf("%v: %d", errNegativeDistributionCount, dist.Count))
|
||||
continue
|
||||
}
|
||||
// TODO: handle exemplars
|
||||
points = append(points, metricdata.HistogramDataPoint{
|
||||
Attributes: attrs,
|
||||
StartTime: t.StartTime,
|
||||
Time: p.Time,
|
||||
Count: uint64(dist.Count),
|
||||
Sum: dist.Sum,
|
||||
Bounds: dist.BucketOptions.Bounds,
|
||||
BucketCounts: bucketCounts,
|
||||
})
|
||||
}
|
||||
}
|
||||
var aggregatedError error
|
||||
if len(errInfo) > 0 {
|
||||
aggregatedError = fmt.Errorf("%w: %v", errHistogramDataPoint, errInfo)
|
||||
}
|
||||
return metricdata.Histogram{DataPoints: points, Temporality: metricdata.CumulativeTemporality}, aggregatedError
|
||||
}
|
||||
|
||||
// convertBucketCounts converts from OpenCensus bucket counts to slice of uint64.
|
||||
func convertBucketCounts(buckets []ocmetricdata.Bucket) ([]uint64, error) {
|
||||
bucketCounts := make([]uint64, len(buckets))
|
||||
for i, bucket := range buckets {
|
||||
if bucket.Count < 0 {
|
||||
return nil, fmt.Errorf("%w: %q", errNegativeBucketCount, bucket.Count)
|
||||
}
|
||||
bucketCounts[i] = uint64(bucket.Count)
|
||||
}
|
||||
return bucketCounts, nil
|
||||
}
|
||||
|
||||
// convertAttrs converts from OpenCensus attribute keys and values to an
|
||||
// OpenTelemetry attribute Set.
|
||||
func convertAttrs(keys []ocmetricdata.LabelKey, values []ocmetricdata.LabelValue) (attribute.Set, error) {
|
||||
if len(keys) != len(values) {
|
||||
return attribute.NewSet(), fmt.Errorf("%w: keys(%q) values(%q)", errMismatchedAttributeKeyValues, len(keys), len(values))
|
||||
}
|
||||
attrs := []attribute.KeyValue{}
|
||||
for i, lv := range values {
|
||||
if !lv.Present {
|
||||
continue
|
||||
}
|
||||
attrs = append(attrs, attribute.KeyValue{
|
||||
Key: attribute.Key(keys[i].Key),
|
||||
Value: attribute.StringValue(lv.Value),
|
||||
})
|
||||
}
|
||||
return attribute.NewSet(attrs...), nil
|
||||
}
|
||||
|
||||
// convertUnit converts from the OpenCensus unit to OpenTelemetry unit.
|
||||
func convertUnit(u ocmetricdata.Unit) unit.Unit {
|
||||
switch u {
|
||||
case ocmetricdata.UnitDimensionless:
|
||||
return unit.Dimensionless
|
||||
case ocmetricdata.UnitBytes:
|
||||
return unit.Bytes
|
||||
case ocmetricdata.UnitMilliseconds:
|
||||
return unit.Milliseconds
|
||||
}
|
||||
return unit.Unit(string(u))
|
||||
}
|
||||
667
bridge/opencensus/opencensusmetric/internal/metric_test.go
Normal file
667
bridge/opencensus/opencensusmetric/internal/metric_test.go
Normal file
@@ -0,0 +1,667 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric/internal"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ocmetricdata "go.opencensus.io/metric/metricdata"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/unit"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest"
|
||||
)
|
||||
|
||||
func TestConvertMetrics(t *testing.T) {
|
||||
endTime1 := time.Now()
|
||||
endTime2 := endTime1.Add(-time.Millisecond)
|
||||
startTime := endTime2.Add(-time.Minute)
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
input []*ocmetricdata.Metric
|
||||
expected []metricdata.Metrics
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
desc: "empty",
|
||||
expected: []metricdata.Metrics{},
|
||||
},
|
||||
{
|
||||
desc: "normal Histogram, gauges, and sums",
|
||||
input: []*ocmetricdata.Metric{
|
||||
{
|
||||
Descriptor: ocmetricdata.Descriptor{
|
||||
Name: "foo.com/histogram-a",
|
||||
Description: "a testing histogram",
|
||||
Unit: ocmetricdata.UnitDimensionless,
|
||||
Type: ocmetricdata.TypeCumulativeDistribution,
|
||||
LabelKeys: []ocmetricdata.LabelKey{
|
||||
{Key: "a"},
|
||||
{Key: "b"},
|
||||
},
|
||||
},
|
||||
TimeSeries: []*ocmetricdata.TimeSeries{
|
||||
{
|
||||
|
||||
LabelValues: []ocmetricdata.LabelValue{
|
||||
{
|
||||
Value: "hello",
|
||||
Present: true,
|
||||
}, {
|
||||
Value: "world",
|
||||
Present: true,
|
||||
},
|
||||
},
|
||||
Points: []ocmetricdata.Point{
|
||||
ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{
|
||||
Count: 8,
|
||||
Sum: 100.0,
|
||||
BucketOptions: &ocmetricdata.BucketOptions{
|
||||
Bounds: []float64{1.0, 2.0, 3.0},
|
||||
},
|
||||
Buckets: []ocmetricdata.Bucket{
|
||||
{Count: 1},
|
||||
{Count: 2},
|
||||
{Count: 5},
|
||||
},
|
||||
}),
|
||||
ocmetricdata.NewDistributionPoint(endTime2, &ocmetricdata.Distribution{
|
||||
Count: 10,
|
||||
Sum: 110.0,
|
||||
BucketOptions: &ocmetricdata.BucketOptions{
|
||||
Bounds: []float64{1.0, 2.0, 3.0},
|
||||
},
|
||||
Buckets: []ocmetricdata.Bucket{
|
||||
{Count: 1},
|
||||
{Count: 4},
|
||||
{Count: 5},
|
||||
},
|
||||
}),
|
||||
},
|
||||
StartTime: startTime,
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Descriptor: ocmetricdata.Descriptor{
|
||||
Name: "foo.com/gauge-a",
|
||||
Description: "an int testing gauge",
|
||||
Unit: ocmetricdata.UnitBytes,
|
||||
Type: ocmetricdata.TypeGaugeInt64,
|
||||
LabelKeys: []ocmetricdata.LabelKey{
|
||||
{Key: "c"},
|
||||
{Key: "d"},
|
||||
},
|
||||
},
|
||||
TimeSeries: []*ocmetricdata.TimeSeries{
|
||||
{
|
||||
LabelValues: []ocmetricdata.LabelValue{
|
||||
{
|
||||
Value: "foo",
|
||||
Present: true,
|
||||
}, {
|
||||
Value: "bar",
|
||||
Present: true,
|
||||
},
|
||||
},
|
||||
Points: []ocmetricdata.Point{
|
||||
ocmetricdata.NewInt64Point(endTime1, 123),
|
||||
ocmetricdata.NewInt64Point(endTime2, 1236),
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Descriptor: ocmetricdata.Descriptor{
|
||||
Name: "foo.com/gauge-b",
|
||||
Description: "a float testing gauge",
|
||||
Unit: ocmetricdata.UnitBytes,
|
||||
Type: ocmetricdata.TypeGaugeFloat64,
|
||||
LabelKeys: []ocmetricdata.LabelKey{
|
||||
{Key: "cf"},
|
||||
{Key: "df"},
|
||||
},
|
||||
},
|
||||
TimeSeries: []*ocmetricdata.TimeSeries{
|
||||
{
|
||||
LabelValues: []ocmetricdata.LabelValue{
|
||||
{
|
||||
Value: "foof",
|
||||
Present: true,
|
||||
}, {
|
||||
Value: "barf",
|
||||
Present: true,
|
||||
},
|
||||
},
|
||||
Points: []ocmetricdata.Point{
|
||||
ocmetricdata.NewFloat64Point(endTime1, 123.4),
|
||||
ocmetricdata.NewFloat64Point(endTime2, 1236.7),
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Descriptor: ocmetricdata.Descriptor{
|
||||
Name: "foo.com/sum-a",
|
||||
Description: "an int testing sum",
|
||||
Unit: ocmetricdata.UnitMilliseconds,
|
||||
Type: ocmetricdata.TypeCumulativeInt64,
|
||||
LabelKeys: []ocmetricdata.LabelKey{
|
||||
{Key: "e"},
|
||||
{Key: "f"},
|
||||
},
|
||||
},
|
||||
TimeSeries: []*ocmetricdata.TimeSeries{
|
||||
{
|
||||
LabelValues: []ocmetricdata.LabelValue{
|
||||
{
|
||||
Value: "zig",
|
||||
Present: true,
|
||||
}, {
|
||||
Value: "zag",
|
||||
Present: true,
|
||||
},
|
||||
},
|
||||
Points: []ocmetricdata.Point{
|
||||
ocmetricdata.NewInt64Point(endTime1, 13),
|
||||
ocmetricdata.NewInt64Point(endTime2, 14),
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Descriptor: ocmetricdata.Descriptor{
|
||||
Name: "foo.com/sum-b",
|
||||
Description: "a float testing sum",
|
||||
Unit: ocmetricdata.UnitMilliseconds,
|
||||
Type: ocmetricdata.TypeCumulativeFloat64,
|
||||
LabelKeys: []ocmetricdata.LabelKey{
|
||||
{Key: "e"},
|
||||
{Key: "f"},
|
||||
},
|
||||
},
|
||||
TimeSeries: []*ocmetricdata.TimeSeries{
|
||||
{
|
||||
LabelValues: []ocmetricdata.LabelValue{
|
||||
{
|
||||
Value: "zig",
|
||||
Present: true,
|
||||
}, {
|
||||
Value: "zag",
|
||||
Present: true,
|
||||
},
|
||||
},
|
||||
Points: []ocmetricdata.Point{
|
||||
ocmetricdata.NewFloat64Point(endTime1, 12.3),
|
||||
ocmetricdata.NewFloat64Point(endTime2, 123.4),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []metricdata.Metrics{
|
||||
{
|
||||
Name: "foo.com/histogram-a",
|
||||
Description: "a testing histogram",
|
||||
Unit: unit.Dimensionless,
|
||||
Data: metricdata.Histogram{
|
||||
DataPoints: []metricdata.HistogramDataPoint{
|
||||
{
|
||||
Attributes: attribute.NewSet(attribute.KeyValue{
|
||||
Key: attribute.Key("a"),
|
||||
Value: attribute.StringValue("hello"),
|
||||
}, attribute.KeyValue{
|
||||
Key: attribute.Key("b"),
|
||||
Value: attribute.StringValue("world"),
|
||||
}),
|
||||
StartTime: startTime,
|
||||
Time: endTime1,
|
||||
Count: 8,
|
||||
Sum: 100.0,
|
||||
Bounds: []float64{1.0, 2.0, 3.0},
|
||||
BucketCounts: []uint64{1, 2, 5},
|
||||
}, {
|
||||
Attributes: attribute.NewSet(attribute.KeyValue{
|
||||
Key: attribute.Key("a"),
|
||||
Value: attribute.StringValue("hello"),
|
||||
}, attribute.KeyValue{
|
||||
Key: attribute.Key("b"),
|
||||
Value: attribute.StringValue("world"),
|
||||
}),
|
||||
StartTime: startTime,
|
||||
Time: endTime2,
|
||||
Count: 10,
|
||||
Sum: 110.0,
|
||||
Bounds: []float64{1.0, 2.0, 3.0},
|
||||
BucketCounts: []uint64{1, 4, 5},
|
||||
},
|
||||
},
|
||||
Temporality: metricdata.CumulativeTemporality,
|
||||
},
|
||||
}, {
|
||||
Name: "foo.com/gauge-a",
|
||||
Description: "an int testing gauge",
|
||||
Unit: unit.Bytes,
|
||||
Data: metricdata.Gauge[int64]{
|
||||
DataPoints: []metricdata.DataPoint[int64]{
|
||||
{
|
||||
Attributes: attribute.NewSet(attribute.KeyValue{
|
||||
Key: attribute.Key("c"),
|
||||
Value: attribute.StringValue("foo"),
|
||||
}, attribute.KeyValue{
|
||||
Key: attribute.Key("d"),
|
||||
Value: attribute.StringValue("bar"),
|
||||
}),
|
||||
Time: endTime1,
|
||||
Value: 123,
|
||||
}, {
|
||||
Attributes: attribute.NewSet(attribute.KeyValue{
|
||||
Key: attribute.Key("c"),
|
||||
Value: attribute.StringValue("foo"),
|
||||
}, attribute.KeyValue{
|
||||
Key: attribute.Key("d"),
|
||||
Value: attribute.StringValue("bar"),
|
||||
}),
|
||||
Time: endTime2,
|
||||
Value: 1236,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "foo.com/gauge-b",
|
||||
Description: "a float testing gauge",
|
||||
Unit: unit.Bytes,
|
||||
Data: metricdata.Gauge[float64]{
|
||||
DataPoints: []metricdata.DataPoint[float64]{
|
||||
{
|
||||
Attributes: attribute.NewSet(attribute.KeyValue{
|
||||
Key: attribute.Key("cf"),
|
||||
Value: attribute.StringValue("foof"),
|
||||
}, attribute.KeyValue{
|
||||
Key: attribute.Key("df"),
|
||||
Value: attribute.StringValue("barf"),
|
||||
}),
|
||||
Time: endTime1,
|
||||
Value: 123.4,
|
||||
}, {
|
||||
Attributes: attribute.NewSet(attribute.KeyValue{
|
||||
Key: attribute.Key("cf"),
|
||||
Value: attribute.StringValue("foof"),
|
||||
}, attribute.KeyValue{
|
||||
Key: attribute.Key("df"),
|
||||
Value: attribute.StringValue("barf"),
|
||||
}),
|
||||
Time: endTime2,
|
||||
Value: 1236.7,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "foo.com/sum-a",
|
||||
Description: "an int testing sum",
|
||||
Unit: unit.Milliseconds,
|
||||
Data: metricdata.Sum[int64]{
|
||||
Temporality: metricdata.CumulativeTemporality,
|
||||
DataPoints: []metricdata.DataPoint[int64]{
|
||||
{
|
||||
Attributes: attribute.NewSet(attribute.KeyValue{
|
||||
Key: attribute.Key("e"),
|
||||
Value: attribute.StringValue("zig"),
|
||||
}, attribute.KeyValue{
|
||||
Key: attribute.Key("f"),
|
||||
Value: attribute.StringValue("zag"),
|
||||
}),
|
||||
Time: endTime1,
|
||||
Value: 13,
|
||||
}, {
|
||||
Attributes: attribute.NewSet(attribute.KeyValue{
|
||||
Key: attribute.Key("e"),
|
||||
Value: attribute.StringValue("zig"),
|
||||
}, attribute.KeyValue{
|
||||
Key: attribute.Key("f"),
|
||||
Value: attribute.StringValue("zag"),
|
||||
}),
|
||||
Time: endTime2,
|
||||
Value: 14,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "foo.com/sum-b",
|
||||
Description: "a float testing sum",
|
||||
Unit: unit.Milliseconds,
|
||||
Data: metricdata.Sum[float64]{
|
||||
Temporality: metricdata.CumulativeTemporality,
|
||||
DataPoints: []metricdata.DataPoint[float64]{
|
||||
{
|
||||
Attributes: attribute.NewSet(attribute.KeyValue{
|
||||
Key: attribute.Key("e"),
|
||||
Value: attribute.StringValue("zig"),
|
||||
}, attribute.KeyValue{
|
||||
Key: attribute.Key("f"),
|
||||
Value: attribute.StringValue("zag"),
|
||||
}),
|
||||
Time: endTime1,
|
||||
Value: 12.3,
|
||||
}, {
|
||||
Attributes: attribute.NewSet(attribute.KeyValue{
|
||||
Key: attribute.Key("e"),
|
||||
Value: attribute.StringValue("zig"),
|
||||
}, attribute.KeyValue{
|
||||
Key: attribute.Key("f"),
|
||||
Value: attribute.StringValue("zag"),
|
||||
}),
|
||||
Time: endTime2,
|
||||
Value: 123.4,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
desc: "histogram without data points",
|
||||
input: []*ocmetricdata.Metric{
|
||||
{
|
||||
Descriptor: ocmetricdata.Descriptor{
|
||||
Name: "foo.com/histogram-a",
|
||||
Description: "a testing histogram",
|
||||
Unit: ocmetricdata.UnitDimensionless,
|
||||
Type: ocmetricdata.TypeCumulativeDistribution,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []metricdata.Metrics{
|
||||
{
|
||||
Name: "foo.com/histogram-a",
|
||||
Description: "a testing histogram",
|
||||
Unit: unit.Dimensionless,
|
||||
Data: metricdata.Histogram{
|
||||
Temporality: metricdata.CumulativeTemporality,
|
||||
DataPoints: []metricdata.HistogramDataPoint{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
desc: "sum without data points",
|
||||
input: []*ocmetricdata.Metric{
|
||||
{
|
||||
Descriptor: ocmetricdata.Descriptor{
|
||||
Name: "foo.com/sum-a",
|
||||
Description: "a testing sum",
|
||||
Unit: ocmetricdata.UnitDimensionless,
|
||||
Type: ocmetricdata.TypeCumulativeFloat64,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []metricdata.Metrics{
|
||||
{
|
||||
Name: "foo.com/sum-a",
|
||||
Description: "a testing sum",
|
||||
Unit: unit.Dimensionless,
|
||||
Data: metricdata.Sum[float64]{
|
||||
Temporality: metricdata.CumulativeTemporality,
|
||||
DataPoints: []metricdata.DataPoint[float64]{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
desc: "gauge without data points",
|
||||
input: []*ocmetricdata.Metric{
|
||||
{
|
||||
Descriptor: ocmetricdata.Descriptor{
|
||||
Name: "foo.com/gauge-a",
|
||||
Description: "a testing gauge",
|
||||
Unit: ocmetricdata.UnitDimensionless,
|
||||
Type: ocmetricdata.TypeGaugeInt64,
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []metricdata.Metrics{
|
||||
{
|
||||
Name: "foo.com/gauge-a",
|
||||
Description: "a testing gauge",
|
||||
Unit: unit.Dimensionless,
|
||||
Data: metricdata.Gauge[int64]{
|
||||
DataPoints: []metricdata.DataPoint[int64]{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
desc: "histogram with negative count",
|
||||
input: []*ocmetricdata.Metric{
|
||||
{
|
||||
Descriptor: ocmetricdata.Descriptor{
|
||||
Name: "foo.com/histogram-a",
|
||||
Description: "a testing histogram",
|
||||
Unit: ocmetricdata.UnitDimensionless,
|
||||
Type: ocmetricdata.TypeCumulativeDistribution,
|
||||
},
|
||||
TimeSeries: []*ocmetricdata.TimeSeries{
|
||||
{
|
||||
Points: []ocmetricdata.Point{
|
||||
ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{
|
||||
Count: -8,
|
||||
}),
|
||||
},
|
||||
StartTime: startTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErr: errConversion,
|
||||
}, {
|
||||
desc: "histogram with negative bucket count",
|
||||
input: []*ocmetricdata.Metric{
|
||||
{
|
||||
Descriptor: ocmetricdata.Descriptor{
|
||||
Name: "foo.com/histogram-a",
|
||||
Description: "a testing histogram",
|
||||
Unit: ocmetricdata.UnitDimensionless,
|
||||
Type: ocmetricdata.TypeCumulativeDistribution,
|
||||
},
|
||||
TimeSeries: []*ocmetricdata.TimeSeries{
|
||||
{
|
||||
Points: []ocmetricdata.Point{
|
||||
ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{
|
||||
Buckets: []ocmetricdata.Bucket{
|
||||
{Count: -1},
|
||||
{Count: 2},
|
||||
{Count: 5},
|
||||
},
|
||||
}),
|
||||
},
|
||||
StartTime: startTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErr: errConversion,
|
||||
}, {
|
||||
desc: "histogram with non-histogram datapoint type",
|
||||
input: []*ocmetricdata.Metric{
|
||||
{
|
||||
Descriptor: ocmetricdata.Descriptor{
|
||||
Name: "foo.com/bad-point",
|
||||
Description: "a bad type",
|
||||
Unit: ocmetricdata.UnitDimensionless,
|
||||
Type: ocmetricdata.TypeCumulativeDistribution,
|
||||
},
|
||||
TimeSeries: []*ocmetricdata.TimeSeries{
|
||||
{
|
||||
Points: []ocmetricdata.Point{
|
||||
ocmetricdata.NewFloat64Point(endTime1, 1.0),
|
||||
},
|
||||
StartTime: startTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErr: errConversion,
|
||||
}, {
|
||||
desc: "sum with non-sum datapoint type",
|
||||
input: []*ocmetricdata.Metric{
|
||||
{
|
||||
Descriptor: ocmetricdata.Descriptor{
|
||||
Name: "foo.com/bad-point",
|
||||
Description: "a bad type",
|
||||
Unit: ocmetricdata.UnitDimensionless,
|
||||
Type: ocmetricdata.TypeCumulativeFloat64,
|
||||
},
|
||||
TimeSeries: []*ocmetricdata.TimeSeries{
|
||||
{
|
||||
Points: []ocmetricdata.Point{
|
||||
ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{}),
|
||||
},
|
||||
StartTime: startTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErr: errConversion,
|
||||
}, {
|
||||
desc: "gauge with non-gauge datapoint type",
|
||||
input: []*ocmetricdata.Metric{
|
||||
{
|
||||
Descriptor: ocmetricdata.Descriptor{
|
||||
Name: "foo.com/bad-point",
|
||||
Description: "a bad type",
|
||||
Unit: ocmetricdata.UnitDimensionless,
|
||||
Type: ocmetricdata.TypeGaugeFloat64,
|
||||
},
|
||||
TimeSeries: []*ocmetricdata.TimeSeries{
|
||||
{
|
||||
Points: []ocmetricdata.Point{
|
||||
ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{}),
|
||||
},
|
||||
StartTime: startTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErr: errConversion,
|
||||
}, {
|
||||
desc: "unsupported Gauge Distribution type",
|
||||
input: []*ocmetricdata.Metric{
|
||||
{
|
||||
Descriptor: ocmetricdata.Descriptor{
|
||||
Name: "foo.com/bad-point",
|
||||
Description: "a bad type",
|
||||
Unit: ocmetricdata.UnitDimensionless,
|
||||
Type: ocmetricdata.TypeGaugeDistribution,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedErr: errConversion,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
output, err := ConvertMetrics(tc.input)
|
||||
if !errors.Is(err, tc.expectedErr) {
|
||||
t.Errorf("convertAggregation(%+v) = err(%v), want err(%v)", tc.input, err, tc.expectedErr)
|
||||
}
|
||||
metricdatatest.AssertEqual[metricdata.ScopeMetrics](t,
|
||||
metricdata.ScopeMetrics{Metrics: tc.expected},
|
||||
metricdata.ScopeMetrics{Metrics: output})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertUnits(t *testing.T) {
|
||||
var noUnit unit.Unit
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
input ocmetricdata.Unit
|
||||
expected unit.Unit
|
||||
}{{
|
||||
desc: "unspecified unit",
|
||||
expected: noUnit,
|
||||
}, {
|
||||
desc: "dimensionless",
|
||||
input: ocmetricdata.UnitDimensionless,
|
||||
expected: unit.Dimensionless,
|
||||
}, {
|
||||
desc: "milliseconds",
|
||||
input: ocmetricdata.UnitMilliseconds,
|
||||
expected: unit.Milliseconds,
|
||||
}, {
|
||||
desc: "bytes",
|
||||
input: ocmetricdata.UnitBytes,
|
||||
expected: unit.Bytes,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
output := convertUnit(tc.input)
|
||||
if output != tc.expected {
|
||||
t.Errorf("convertUnit(%v) = %q, want %q", tc.input, output, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertAttributes(t *testing.T) {
|
||||
setWithMultipleKeys := attribute.NewSet(
|
||||
attribute.KeyValue{Key: attribute.Key("first"), Value: attribute.StringValue("1")},
|
||||
attribute.KeyValue{Key: attribute.Key("second"), Value: attribute.StringValue("2")},
|
||||
)
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
inputKeys []ocmetricdata.LabelKey
|
||||
inputValues []ocmetricdata.LabelValue
|
||||
expected *attribute.Set
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
desc: "no attributes",
|
||||
expected: attribute.EmptySet(),
|
||||
},
|
||||
{
|
||||
desc: "different numbers of keys and values",
|
||||
inputKeys: []ocmetricdata.LabelKey{{Key: "foo"}},
|
||||
expected: attribute.EmptySet(),
|
||||
expectedErr: errMismatchedAttributeKeyValues,
|
||||
},
|
||||
{
|
||||
desc: "multiple keys and values",
|
||||
inputKeys: []ocmetricdata.LabelKey{{Key: "first"}, {Key: "second"}},
|
||||
inputValues: []ocmetricdata.LabelValue{
|
||||
{Value: "1", Present: true},
|
||||
{Value: "2", Present: true},
|
||||
},
|
||||
expected: &setWithMultipleKeys,
|
||||
},
|
||||
{
|
||||
desc: "multiple keys and values with some not present",
|
||||
inputKeys: []ocmetricdata.LabelKey{{Key: "first"}, {Key: "second"}, {Key: "third"}},
|
||||
inputValues: []ocmetricdata.LabelValue{
|
||||
{Value: "1", Present: true},
|
||||
{Value: "2", Present: true},
|
||||
{Present: false},
|
||||
},
|
||||
expected: &setWithMultipleKeys,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
output, err := convertAttrs(tc.inputKeys, tc.inputValues)
|
||||
if !errors.Is(err, tc.expectedErr) {
|
||||
t.Errorf("convertAttrs(keys: %v, values: %v) = err(%v), want err(%v)", tc.inputKeys, tc.inputValues, err, tc.expectedErr)
|
||||
}
|
||||
if !output.Equals(tc.expected) {
|
||||
t.Errorf("convertAttrs(keys: %v, values: %v) = %+v, want %+v", tc.inputKeys, tc.inputValues, output.ToSlice(), tc.expected.ToSlice())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -14,8 +14,6 @@ require (
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||
go.opentelemetry.io/otel/metric v0.31.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v0.31.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 // indirect
|
||||
)
|
||||
|
||||
@@ -23,10 +21,6 @@ replace go.opentelemetry.io/otel => ../../..
|
||||
|
||||
replace go.opentelemetry.io/otel/bridge/opencensus => ../
|
||||
|
||||
replace go.opentelemetry.io/otel/metric => ../../../metric
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk => ../../../sdk
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric
|
||||
|
||||
replace go.opentelemetry.io/otel/trace => ../../../trace
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
module go.opentelemetry.io/otel/example/opencensus
|
||||
|
||||
go 1.17
|
||||
|
||||
replace (
|
||||
go.opentelemetry.io/otel => ../..
|
||||
go.opentelemetry.io/otel/bridge/opencensus => ../../bridge/opencensus
|
||||
go.opentelemetry.io/otel/sdk => ../../sdk
|
||||
)
|
||||
|
||||
require (
|
||||
go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f
|
||||
go.opentelemetry.io/otel v1.10.0
|
||||
go.opentelemetry.io/otel/bridge/opencensus v0.31.0
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.31.0
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.10.0
|
||||
go.opentelemetry.io/otel/sdk v1.10.0
|
||||
go.opentelemetry.io/otel/sdk/metric v0.31.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.31.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.10.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 // indirect
|
||||
)
|
||||
|
||||
replace go.opentelemetry.io/otel/metric => ../../metric
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric
|
||||
|
||||
replace go.opentelemetry.io/otel/trace => ../../trace
|
||||
|
||||
replace go.opentelemetry.io/otel/exporters/stdout/stdoutmetric => ../../exporters/stdout/stdoutmetric
|
||||
|
||||
replace go.opentelemetry.io/otel/exporters/stdout/stdouttrace => ../../exporters/stdout/stdouttrace
|
||||
@@ -1,61 +0,0 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
||||
go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f h1:IUmbcoP9XyEXW+R9AbrZgDvaYVfTbISN92Y5RIV+Mx4=
|
||||
go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 h1:iGu644GcxtEcrInvDsQRCwJjtCIOlT2V7IRt6ah2Whw=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
@@ -1,154 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/metric"
|
||||
"go.opencensus.io/metric/metricdata"
|
||||
"go.opencensus.io/metric/metricexport"
|
||||
"go.opencensus.io/metric/metricproducer"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/tag"
|
||||
octrace "go.opencensus.io/trace"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/bridge/opencensus"
|
||||
"go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
|
||||
"go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
)
|
||||
|
||||
var (
|
||||
// instrumenttype differentiates between our gauge and view metrics.
|
||||
keyType = tag.MustNewKey("instrumenttype")
|
||||
// Counts the number of lines read in from standard input.
|
||||
countMeasure = stats.Int64("test_count", "A count of something", stats.UnitDimensionless)
|
||||
countView = &view.View{
|
||||
Name: "test_count",
|
||||
Measure: countMeasure,
|
||||
Description: "A count of something",
|
||||
Aggregation: view.Count(),
|
||||
TagKeys: []tag.Key{keyType},
|
||||
}
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.Println("Using OpenTelemetry stdout exporters.")
|
||||
traceExporter, err := stdouttrace.New(stdouttrace.WithPrettyPrint())
|
||||
if err != nil {
|
||||
log.Fatal(fmt.Errorf("error creating trace exporter: %w", err))
|
||||
}
|
||||
metricsExporter, err := stdoutmetric.New(stdoutmetric.WithPrettyPrint())
|
||||
if err != nil {
|
||||
log.Fatal(fmt.Errorf("error creating metric exporter: %w", err))
|
||||
}
|
||||
tracing(traceExporter)
|
||||
if err := monitoring(metricsExporter); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// tracing demonstrates overriding the OpenCensus DefaultTracer to send spans
|
||||
// to the OpenTelemetry exporter by calling OpenCensus APIs.
|
||||
func tracing(otExporter sdktrace.SpanExporter) {
|
||||
ctx := context.Background()
|
||||
|
||||
log.Println("Configuring OpenCensus. Not Registering any OpenCensus exporters.")
|
||||
octrace.ApplyConfig(octrace.Config{DefaultSampler: octrace.AlwaysSample()})
|
||||
|
||||
tp := sdktrace.NewTracerProvider(sdktrace.WithBatcher(otExporter))
|
||||
otel.SetTracerProvider(tp)
|
||||
|
||||
log.Println("Installing the OpenCensus bridge to make OpenCensus libraries write spans using OpenTelemetry.")
|
||||
tracer := tp.Tracer("simple")
|
||||
octrace.DefaultTracer = opencensus.NewTracer(tracer)
|
||||
tp.ForceFlush(ctx)
|
||||
|
||||
log.Println("Creating OpenCensus span, which should be printed out using the OpenTelemetry stdouttrace exporter.\n-- It should have no parent, since it is the first span.")
|
||||
ctx, outerOCSpan := octrace.StartSpan(ctx, "OpenCensusOuterSpan")
|
||||
outerOCSpan.End()
|
||||
tp.ForceFlush(ctx)
|
||||
|
||||
log.Println("Creating OpenTelemetry span\n-- It should have the OpenCensus span as a parent, since the OpenCensus span was written with using OpenTelemetry APIs.")
|
||||
ctx, otspan := tracer.Start(ctx, "OpenTelemetrySpan")
|
||||
otspan.End()
|
||||
tp.ForceFlush(ctx)
|
||||
|
||||
log.Println("Creating OpenCensus span, which should be printed out using the OpenTelemetry stdouttrace exporter.\n-- It should have the OpenTelemetry span as a parent, since it was written using OpenTelemetry APIs")
|
||||
_, innerOCSpan := octrace.StartSpan(ctx, "OpenCensusInnerSpan")
|
||||
innerOCSpan.End()
|
||||
tp.ForceFlush(ctx)
|
||||
}
|
||||
|
||||
// monitoring demonstrates creating an IntervalReader using the OpenTelemetry
|
||||
// exporter to send metrics to the exporter by using either an OpenCensus
|
||||
// registry or an OpenCensus view.
|
||||
func monitoring(otExporter export.Exporter) error {
|
||||
log.Println("Using the OpenTelemetry stdoutmetric exporter to export OpenCensus metrics. This allows routing telemetry from both OpenTelemetry and OpenCensus to a single exporter.")
|
||||
ocExporter := opencensus.NewMetricExporter(otExporter)
|
||||
intervalReader, err := metricexport.NewIntervalReader(&metricexport.Reader{}, ocExporter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create interval reader: %w", err)
|
||||
}
|
||||
intervalReader.ReportingInterval = 10 * time.Second
|
||||
log.Println("Emitting metrics using OpenCensus APIs. These should be printed out using the OpenTelemetry stdoutmetric exporter.")
|
||||
err = intervalReader.Start()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start interval reader: %w", err)
|
||||
}
|
||||
defer intervalReader.Stop()
|
||||
|
||||
log.Println("Registering a gauge metric using an OpenCensus registry.")
|
||||
r := metric.NewRegistry()
|
||||
metricproducer.GlobalManager().AddProducer(r)
|
||||
gauge, err := r.AddInt64Gauge(
|
||||
"test_gauge",
|
||||
metric.WithDescription("A gauge for testing"),
|
||||
metric.WithConstLabel(map[metricdata.LabelKey]metricdata.LabelValue{
|
||||
{Key: keyType.Name()}: metricdata.NewLabelValue("gauge"),
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add gauge: %w", err)
|
||||
}
|
||||
entry, err := gauge.GetEntry()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get gauge entry: %w", err)
|
||||
}
|
||||
|
||||
log.Println("Registering a cumulative metric using an OpenCensus view.")
|
||||
if err := view.Register(countView); err != nil {
|
||||
return fmt.Errorf("failed to register views: %w", err)
|
||||
}
|
||||
ctx, err := tag.New(context.Background(), tag.Insert(keyType, "view"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set tag: %w", err)
|
||||
}
|
||||
for i := int64(1); true; i++ {
|
||||
// update stats for our gauge
|
||||
entry.Set(i)
|
||||
// update stats for our view
|
||||
stats.Record(ctx, countMeasure.M(1))
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -12,4 +12,5 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpconfig
|
||||
// Package main provides a code sample of the Prometheus exporter.
|
||||
package main
|
||||
@@ -1,14 +1,9 @@
|
||||
module go.opentelemetry.io/otel/example/prometheus
|
||||
|
||||
go 1.17
|
||||
|
||||
replace (
|
||||
go.opentelemetry.io/otel => ../..
|
||||
go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus
|
||||
go.opentelemetry.io/otel/sdk => ../../sdk
|
||||
)
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
go.opentelemetry.io/otel v1.10.0
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.31.0
|
||||
go.opentelemetry.io/otel/metric v0.31.0
|
||||
@@ -22,18 +17,23 @@ require (
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/prometheus/client_golang v1.12.2 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.10.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
google.golang.org/protobuf v1.26.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
)
|
||||
|
||||
replace go.opentelemetry.io/otel/metric => ../../metric
|
||||
replace go.opentelemetry.io/otel => ../..
|
||||
|
||||
replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk => ../../sdk
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric
|
||||
|
||||
replace go.opentelemetry.io/otel/metric => ../../metric
|
||||
|
||||
replace go.opentelemetry.io/otel/trace => ../../trace
|
||||
|
||||
@@ -38,7 +38,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
@@ -65,9 +64,11 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
@@ -165,8 +166,9 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
||||
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -175,14 +177,16 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
@@ -266,12 +270,15 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -316,15 +323,20 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -446,8 +458,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
@@ -12,6 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
@@ -21,118 +24,75 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/prometheus"
|
||||
"go.opentelemetry.io/otel/metric/global"
|
||||
otelprom "go.opentelemetry.io/otel/exporters/prometheus"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
selector "go.opentelemetry.io/otel/sdk/metric/selector/simple"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
)
|
||||
|
||||
var (
|
||||
lemonsKey = attribute.Key("ex.com/lemons")
|
||||
)
|
||||
|
||||
func initMeter() error {
|
||||
config := prometheus.Config{
|
||||
DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50},
|
||||
}
|
||||
c := controller.New(
|
||||
processor.NewFactory(
|
||||
selector.NewWithHistogramDistribution(
|
||||
histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),
|
||||
),
|
||||
aggregation.CumulativeTemporalitySelector(),
|
||||
processor.WithMemory(true),
|
||||
),
|
||||
)
|
||||
exporter, err := prometheus.New(config, c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize prometheus exporter: %w", err)
|
||||
}
|
||||
|
||||
global.SetMeterProvider(exporter.MeterProvider())
|
||||
|
||||
http.HandleFunc("/", exporter.ServeHTTP)
|
||||
go func() {
|
||||
_ = http.ListenAndServe(":2222", nil)
|
||||
}()
|
||||
|
||||
fmt.Println("Prometheus server running on :2222")
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := initMeter(); err != nil {
|
||||
ctx := context.Background()
|
||||
|
||||
// The exporter embeds a default OpenTelemetry Reader and
|
||||
// implements prometheus.Collector, allowing it to be used as
|
||||
// both a Reader and Collector.
|
||||
exporter := otelprom.New()
|
||||
provider := metric.NewMeterProvider(metric.WithReader(exporter))
|
||||
meter := provider.Meter("github.com/open-telemetry/opentelemetry-go/example/prometheus")
|
||||
|
||||
// Start the prometheus HTTP server and pass the exporter Collector to it
|
||||
go serveMetrics(exporter.Collector)
|
||||
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.Key("A").String("B"),
|
||||
attribute.Key("C").String("D"),
|
||||
}
|
||||
|
||||
// This is the equivalent of prometheus.NewCounterVec
|
||||
counter, err := meter.SyncFloat64().Counter("foo", instrument.WithDescription("a simple counter"))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
counter.Add(ctx, 5, attrs...)
|
||||
|
||||
meter := global.Meter("ex.com/basic")
|
||||
|
||||
observerLock := new(sync.RWMutex)
|
||||
observerValueToReport := new(float64)
|
||||
observerAttrsToReport := new([]attribute.KeyValue)
|
||||
|
||||
gaugeObserver, err := meter.AsyncFloat64().Gauge("ex.com.one")
|
||||
gauge, err := meter.SyncFloat64().UpDownCounter("bar", instrument.WithDescription("a fun little gauge"))
|
||||
if err != nil {
|
||||
log.Panicf("failed to initialize instrument: %v", err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
_ = meter.RegisterCallback([]instrument.Asynchronous{gaugeObserver}, func(ctx context.Context) {
|
||||
(*observerLock).RLock()
|
||||
value := *observerValueToReport
|
||||
attrs := *observerAttrsToReport
|
||||
(*observerLock).RUnlock()
|
||||
gaugeObserver.Observe(ctx, value, attrs...)
|
||||
})
|
||||
gauge.Add(ctx, 100, attrs...)
|
||||
gauge.Add(ctx, -25, attrs...)
|
||||
|
||||
hist, err := meter.SyncFloat64().Histogram("ex.com.two")
|
||||
// This is the equivalent of prometheus.NewHistogramVec
|
||||
histogram, err := meter.SyncFloat64().Histogram("baz", instrument.WithDescription("a very nice histogram"))
|
||||
if err != nil {
|
||||
log.Panicf("failed to initialize instrument: %v", err)
|
||||
}
|
||||
counter, err := meter.SyncFloat64().Counter("ex.com.three")
|
||||
if err != nil {
|
||||
log.Panicf("failed to initialize instrument: %v", err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
histogram.Record(ctx, 23, attrs...)
|
||||
histogram.Record(ctx, 7, attrs...)
|
||||
histogram.Record(ctx, 101, attrs...)
|
||||
histogram.Record(ctx, 105, attrs...)
|
||||
|
||||
commonAttrs := []attribute.KeyValue{lemonsKey.Int(10), attribute.String("A", "1"), attribute.String("B", "2"), attribute.String("C", "3")}
|
||||
notSoCommonAttrs := []attribute.KeyValue{lemonsKey.Int(13)}
|
||||
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||
defer stop()
|
||||
|
||||
(*observerLock).Lock()
|
||||
*observerValueToReport = 1.0
|
||||
*observerAttrsToReport = commonAttrs
|
||||
(*observerLock).Unlock()
|
||||
|
||||
hist.Record(ctx, 2.0, commonAttrs...)
|
||||
counter.Add(ctx, 12.0, commonAttrs...)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
(*observerLock).Lock()
|
||||
*observerValueToReport = 1.0
|
||||
*observerAttrsToReport = notSoCommonAttrs
|
||||
(*observerLock).Unlock()
|
||||
hist.Record(ctx, 2.0, notSoCommonAttrs...)
|
||||
counter.Add(ctx, 22.0, notSoCommonAttrs...)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
(*observerLock).Lock()
|
||||
*observerValueToReport = 13.0
|
||||
*observerAttrsToReport = commonAttrs
|
||||
(*observerLock).Unlock()
|
||||
hist.Record(ctx, 12.0, commonAttrs...)
|
||||
counter.Add(ctx, 13.0, commonAttrs...)
|
||||
|
||||
fmt.Println("Example finished updating, please visit :2222")
|
||||
|
||||
ctx, _ = signal.NotifyContext(ctx, os.Interrupt)
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func serveMetrics(collector prometheus.Collector) {
|
||||
registry := prometheus.NewRegistry()
|
||||
err := registry.Register(collector)
|
||||
if err != nil {
|
||||
fmt.Printf("error registering collector: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("serving metrics at localhost:2222/metrics")
|
||||
http.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{}))
|
||||
err = http.ListenAndServe(":2222", nil)
|
||||
if err != nil {
|
||||
fmt.Printf("error serving http: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
52
exporters/otlp/otlpmetric/client.go
Normal file
52
exporters/otlp/otlpmetric/client.go
Normal file
@@ -0,0 +1,52 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
// Client handles the transmission of OTLP data to an OTLP receiving endpoint.
|
||||
type Client interface {
|
||||
// UploadMetrics transmits metric data to an OTLP receiver.
|
||||
//
|
||||
// All retry logic must be handled by UploadMetrics alone, the Exporter
|
||||
// does not implement any retry logic. All returned errors are considered
|
||||
// unrecoverable.
|
||||
UploadMetrics(context.Context, *mpb.ResourceMetrics) error
|
||||
|
||||
// ForceFlush flushes any metric data held by an Client.
|
||||
//
|
||||
// The deadline or cancellation of the passed context must be honored. An
|
||||
// appropriate error should be returned in these situations.
|
||||
ForceFlush(context.Context) error
|
||||
|
||||
// Shutdown flushes all metric data held by a Client and closes any
|
||||
// connections it holds open.
|
||||
//
|
||||
// The deadline or cancellation of the passed context must be honored. An
|
||||
// appropriate error should be returned in these situations.
|
||||
//
|
||||
// Shutdown will only be called once by the Exporter. Once a return value
|
||||
// is received by the Exporter from Shutdown the Client will not be used
|
||||
// anymore. Therefore all computational resources need to be released
|
||||
// after this is called so the Client can be garbage collected.
|
||||
Shutdown(context.Context) error
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
// Client manages connections to the collector, handles the
|
||||
// transformation of data into wire format, and the transmission of that
|
||||
// data to the collector.
|
||||
type Client interface {
|
||||
// Start should establish connection(s) to endpoint(s). It is
|
||||
// called just once by the exporter, so the implementation
|
||||
// does not need to worry about idempotence and locking.
|
||||
Start(ctx context.Context) error
|
||||
// Stop should close the connections. The function is called
|
||||
// only once by the exporter, so the implementation does not
|
||||
// need to worry about idempotence, but it may be called
|
||||
// concurrently with UploadMetrics, so proper
|
||||
// locking is required. The function serves as a
|
||||
// synchronization point - after the function returns, the
|
||||
// process of closing connections is assumed to be finished.
|
||||
Stop(ctx context.Context) error
|
||||
// UploadMetrics should transform the passed metrics to the
|
||||
// wire format and send it to the collector. May be called
|
||||
// concurrently.
|
||||
UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error
|
||||
}
|
||||
20
exporters/otlp/otlpmetric/doc.go
Normal file
20
exporters/otlp/otlpmetric/doc.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package otlpmetric provides an OpenTelemetry metric Exporter that can be
|
||||
// used with PeriodicReader. It transforms metricdata into OTLP and transmits
|
||||
// the transformed data to OTLP receivers. The Exporter is configurable to use
|
||||
// different Clients, each using a distinct transport protocol to communicate
|
||||
// to an OTLP receiving endpoint.
|
||||
package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
@@ -12,121 +12,96 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
errAlreadyStarted = errors.New("already started")
|
||||
)
|
||||
// exporter exports metrics data as OTLP.
|
||||
type exporter struct {
|
||||
// Ensure synchronous access to the client across all functionality.
|
||||
clientMu sync.Mutex
|
||||
client Client
|
||||
|
||||
// Exporter exports metrics data in the OTLP wire format.
|
||||
type Exporter struct {
|
||||
client Client
|
||||
temporalitySelector aggregation.TemporalitySelector
|
||||
|
||||
mu sync.RWMutex
|
||||
started bool
|
||||
|
||||
startOnce sync.Once
|
||||
stopOnce sync.Once
|
||||
shutdownOnce sync.Once
|
||||
}
|
||||
|
||||
// Export exports a batch of metrics.
|
||||
func (e *Exporter) Export(ctx context.Context, res *resource.Resource, ilr export.InstrumentationLibraryReader) error {
|
||||
rm, err := metrictransform.InstrumentationLibraryReader(ctx, e, res, ilr, 1)
|
||||
if err != nil {
|
||||
// Export transforms and transmits metric data to an OTLP receiver.
|
||||
func (e *exporter) Export(ctx context.Context, rm metricdata.ResourceMetrics) error {
|
||||
otlpRm, err := transform.ResourceMetrics(rm)
|
||||
// Best effort upload of transformable metrics.
|
||||
e.clientMu.Lock()
|
||||
upErr := e.client.UploadMetrics(ctx, otlpRm)
|
||||
e.clientMu.Unlock()
|
||||
if upErr != nil {
|
||||
if err == nil {
|
||||
return upErr
|
||||
}
|
||||
// Merge the two errors.
|
||||
return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ForceFlush flushes any metric data held by an exporter.
|
||||
func (e *exporter) ForceFlush(ctx context.Context) error {
|
||||
// The Exporter does not hold data, forward the command to the client.
|
||||
e.clientMu.Lock()
|
||||
defer e.clientMu.Unlock()
|
||||
return e.client.ForceFlush(ctx)
|
||||
}
|
||||
|
||||
var errShutdown = fmt.Errorf("exporter is shutdown")
|
||||
|
||||
// Shutdown flushes all metric data held by an exporter and releases any held
|
||||
// computational resources.
|
||||
func (e *exporter) Shutdown(ctx context.Context) error {
|
||||
err := errShutdown
|
||||
e.shutdownOnce.Do(func() {
|
||||
e.clientMu.Lock()
|
||||
client := e.client
|
||||
e.client = shutdownClient{}
|
||||
e.clientMu.Unlock()
|
||||
err = client.Shutdown(ctx)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// New return an Exporter that uses client to transmits the OTLP data it
|
||||
// produces. The client is assumed to be fully started and able to communicate
|
||||
// with its OTLP receiving endpoint.
|
||||
func New(client Client) metric.Exporter {
|
||||
return &exporter{client: client}
|
||||
}
|
||||
|
||||
type shutdownClient struct{}
|
||||
|
||||
func (c shutdownClient) err(ctx context.Context) error {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
if rm == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: There is never more than one resource emitted by this
|
||||
// call, as per the specification. We can change the
|
||||
// signature of UploadMetrics correspondingly. Here create a
|
||||
// singleton list to reduce the size of the current PR:
|
||||
return e.client.UploadMetrics(ctx, rm)
|
||||
return errShutdown
|
||||
}
|
||||
|
||||
// Start establishes a connection to the receiving endpoint.
|
||||
func (e *Exporter) Start(ctx context.Context) error {
|
||||
var err = errAlreadyStarted
|
||||
e.startOnce.Do(func() {
|
||||
e.mu.Lock()
|
||||
e.started = true
|
||||
e.mu.Unlock()
|
||||
err = e.client.Start(ctx)
|
||||
})
|
||||
|
||||
return err
|
||||
func (c shutdownClient) UploadMetrics(ctx context.Context, _ *mpb.ResourceMetrics) error {
|
||||
return c.err(ctx)
|
||||
}
|
||||
|
||||
// Shutdown flushes all exports and closes all connections to the receiving endpoint.
|
||||
func (e *Exporter) Shutdown(ctx context.Context) error {
|
||||
e.mu.RLock()
|
||||
started := e.started
|
||||
e.mu.RUnlock()
|
||||
|
||||
if !started {
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
e.stopOnce.Do(func() {
|
||||
err = e.client.Stop(ctx)
|
||||
e.mu.Lock()
|
||||
e.started = false
|
||||
e.mu.Unlock()
|
||||
})
|
||||
|
||||
return err
|
||||
func (c shutdownClient) ForceFlush(ctx context.Context) error {
|
||||
return c.err(ctx)
|
||||
}
|
||||
|
||||
// TemporalityFor returns the accepted temporality for a metric measurment.
|
||||
func (e *Exporter) TemporalityFor(descriptor *sdkapi.Descriptor, kind aggregation.Kind) aggregation.Temporality {
|
||||
return e.temporalitySelector.TemporalityFor(descriptor, kind)
|
||||
}
|
||||
|
||||
var _ export.Exporter = (*Exporter)(nil)
|
||||
|
||||
// New constructs a new Exporter and starts it.
|
||||
func New(ctx context.Context, client Client, opts ...Option) (*Exporter, error) {
|
||||
exp := NewUnstarted(client, opts...)
|
||||
if err := exp.Start(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return exp, nil
|
||||
}
|
||||
|
||||
// NewUnstarted constructs a new Exporter and does not start it.
|
||||
func NewUnstarted(client Client, opts ...Option) *Exporter {
|
||||
cfg := config{
|
||||
// Note: the default TemporalitySelector is specified
|
||||
// as Cumulative:
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/issues/731
|
||||
temporalitySelector: aggregation.CumulativeTemporalitySelector(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
cfg = opt.apply(cfg)
|
||||
}
|
||||
|
||||
e := &Exporter{
|
||||
client: client,
|
||||
temporalitySelector: cfg.temporalitySelector,
|
||||
}
|
||||
|
||||
return e
|
||||
func (c shutdownClient) Shutdown(ctx context.Context) error {
|
||||
return c.err(ctx)
|
||||
}
|
||||
|
||||
@@ -12,837 +12,82 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetric_test
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/testing/protocmp"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
"go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// Timestamps used in this test:
|
||||
|
||||
intervalStart = time.Now()
|
||||
intervalEnd = intervalStart.Add(time.Hour)
|
||||
)
|
||||
|
||||
type stubClient struct {
|
||||
rm []*metricpb.ResourceMetrics
|
||||
type client struct {
|
||||
// n is incremented by all Client methods. If these methods are called
|
||||
// concurrently this should fail tests run with the race detector.
|
||||
n int
|
||||
}
|
||||
|
||||
func (m *stubClient) Start(ctx context.Context) error {
|
||||
func (c *client) UploadMetrics(context.Context, *mpb.ResourceMetrics) error {
|
||||
c.n++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *stubClient) Stop(ctx context.Context) error {
|
||||
func (c *client) ForceFlush(context.Context) error {
|
||||
c.n++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *stubClient) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
|
||||
m.rm = append(m.rm, protoMetrics)
|
||||
func (c *client) Shutdown(context.Context) error {
|
||||
c.n++
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ otlpmetric.Client = (*stubClient)(nil)
|
||||
func TestExporterClientConcurrency(t *testing.T) {
|
||||
const goroutines = 5
|
||||
|
||||
func (m *stubClient) Reset() {
|
||||
m.rm = nil
|
||||
}
|
||||
exp := New(&client{})
|
||||
rm := metricdata.ResourceMetrics{}
|
||||
ctx := context.Background()
|
||||
|
||||
func newExporter(t *testing.T, opts ...otlpmetric.Option) (*otlpmetric.Exporter, *stubClient) {
|
||||
client := &stubClient{}
|
||||
exp, _ := otlpmetric.New(context.Background(), client, opts...)
|
||||
return exp, client
|
||||
}
|
||||
done := make(chan struct{})
|
||||
first := make(chan struct{}, goroutines)
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < goroutines; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
assert.NoError(t, exp.Export(ctx, rm))
|
||||
assert.NoError(t, exp.ForceFlush(ctx))
|
||||
// Ensure some work is done before shutting down.
|
||||
first <- struct{}{}
|
||||
|
||||
func startTime() uint64 {
|
||||
return uint64(intervalStart.UnixNano())
|
||||
}
|
||||
for {
|
||||
_ = exp.Export(ctx, rm)
|
||||
_ = exp.ForceFlush(ctx)
|
||||
|
||||
func pointTime() uint64 {
|
||||
return uint64(intervalEnd.UnixNano())
|
||||
}
|
||||
|
||||
type testRecord struct {
|
||||
name string
|
||||
iKind sdkapi.InstrumentKind
|
||||
nKind number.Kind
|
||||
attrs []attribute.KeyValue
|
||||
|
||||
meterName string
|
||||
meterOpts []metric.MeterOption
|
||||
}
|
||||
|
||||
func record(
|
||||
name string,
|
||||
iKind sdkapi.InstrumentKind,
|
||||
nKind number.Kind,
|
||||
attrs []attribute.KeyValue,
|
||||
meterName string,
|
||||
meterOpts ...metric.MeterOption) testRecord {
|
||||
return testRecord{
|
||||
name: name,
|
||||
iKind: iKind,
|
||||
nKind: nKind,
|
||||
attrs: attrs,
|
||||
meterName: meterName,
|
||||
meterOpts: meterOpts,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
baseKeyValues = []attribute.KeyValue{attribute.String("host", "test.com")}
|
||||
cpuKey = attribute.Key("CPU")
|
||||
|
||||
testHistogramBoundaries = []float64{2.0, 4.0, 8.0}
|
||||
|
||||
cpu1Attrs = []*commonpb.KeyValue{
|
||||
{
|
||||
Key: "CPU",
|
||||
Value: &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_IntValue{
|
||||
IntValue: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: "host",
|
||||
Value: &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_StringValue{
|
||||
StringValue: "test.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
cpu2Attrs = []*commonpb.KeyValue{
|
||||
{
|
||||
Key: "CPU",
|
||||
Value: &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_IntValue{
|
||||
IntValue: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: "host",
|
||||
Value: &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_StringValue{
|
||||
StringValue: "test.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testerAResource = resource.NewSchemaless(attribute.String("instance", "tester-a"))
|
||||
testerAResourcePb = metrictransform.Resource(testerAResource)
|
||||
)
|
||||
|
||||
const (
|
||||
// Most of this test uses an empty instrumentation library name.
|
||||
testLibName = ""
|
||||
)
|
||||
|
||||
func TestNoGroupingExport(t *testing.T) {
|
||||
runMetricExportTests(
|
||||
t,
|
||||
nil,
|
||||
resource.Empty(),
|
||||
[]testRecord{
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
testLibName,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(2)),
|
||||
testLibName,
|
||||
),
|
||||
},
|
||||
[]*metricpb.ResourceMetrics{
|
||||
{
|
||||
Resource: nil,
|
||||
ScopeMetrics: []*metricpb.ScopeMetrics{
|
||||
{
|
||||
Metrics: []*metricpb.Metric{
|
||||
{
|
||||
Name: "int64-count",
|
||||
Data: &metricpb.Metric_Sum{
|
||||
Sum: &metricpb.Sum{
|
||||
IsMonotonic: true,
|
||||
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
|
||||
Attributes: cpu2Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestHistogramInt64MetricGroupingExport(t *testing.T) {
|
||||
r := record(
|
||||
"int64-histogram",
|
||||
sdkapi.HistogramInstrumentKind,
|
||||
number.Int64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
testLibName,
|
||||
)
|
||||
sumVal := 11.0
|
||||
expected := []*metricpb.ResourceMetrics{
|
||||
{
|
||||
Resource: nil,
|
||||
ScopeMetrics: []*metricpb.ScopeMetrics{
|
||||
{
|
||||
Metrics: []*metricpb.Metric{
|
||||
{
|
||||
Name: "int64-histogram",
|
||||
Data: &metricpb.Metric_Histogram{
|
||||
Histogram: &metricpb.Histogram{
|
||||
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||
DataPoints: []*metricpb.HistogramDataPoint{
|
||||
{
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
Count: 2,
|
||||
Sum: &sumVal,
|
||||
ExplicitBounds: testHistogramBoundaries,
|
||||
BucketCounts: []uint64{1, 0, 0, 1},
|
||||
},
|
||||
{
|
||||
Attributes: cpu1Attrs,
|
||||
Count: 2,
|
||||
Sum: &sumVal,
|
||||
ExplicitBounds: testHistogramBoundaries,
|
||||
BucketCounts: []uint64{1, 0, 0, 1},
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
runMetricExportTests(t, nil, resource.Empty(), []testRecord{r, r}, expected)
|
||||
}
|
||||
|
||||
func TestHistogramFloat64MetricGroupingExport(t *testing.T) {
|
||||
r := record(
|
||||
"float64-histogram",
|
||||
sdkapi.HistogramInstrumentKind,
|
||||
number.Float64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
testLibName,
|
||||
)
|
||||
sumVal := 11.0
|
||||
expected := []*metricpb.ResourceMetrics{
|
||||
{
|
||||
Resource: nil,
|
||||
ScopeMetrics: []*metricpb.ScopeMetrics{
|
||||
{
|
||||
Metrics: []*metricpb.Metric{
|
||||
{
|
||||
Name: "float64-histogram",
|
||||
Data: &metricpb.Metric_Histogram{
|
||||
Histogram: &metricpb.Histogram{
|
||||
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||
DataPoints: []*metricpb.HistogramDataPoint{
|
||||
{
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
Count: 2,
|
||||
Sum: &sumVal,
|
||||
ExplicitBounds: testHistogramBoundaries,
|
||||
BucketCounts: []uint64{1, 0, 0, 1},
|
||||
},
|
||||
{
|
||||
Attributes: cpu1Attrs,
|
||||
Count: 2,
|
||||
Sum: &sumVal,
|
||||
ExplicitBounds: testHistogramBoundaries,
|
||||
BucketCounts: []uint64{1, 0, 0, 1},
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
runMetricExportTests(t, nil, resource.Empty(), []testRecord{r, r}, expected)
|
||||
}
|
||||
|
||||
func TestCountInt64MetricGroupingExport(t *testing.T) {
|
||||
r := record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
testLibName,
|
||||
)
|
||||
runMetricExportTests(
|
||||
t,
|
||||
nil,
|
||||
resource.Empty(),
|
||||
[]testRecord{r, r},
|
||||
[]*metricpb.ResourceMetrics{
|
||||
{
|
||||
Resource: nil,
|
||||
ScopeMetrics: []*metricpb.ScopeMetrics{
|
||||
{
|
||||
Metrics: []*metricpb.Metric{
|
||||
{
|
||||
Name: "int64-count",
|
||||
Data: &metricpb.Metric_Sum{
|
||||
Sum: &metricpb.Sum{
|
||||
IsMonotonic: true,
|
||||
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestCountFloat64MetricGroupingExport(t *testing.T) {
|
||||
r := record(
|
||||
"float64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Float64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
testLibName,
|
||||
)
|
||||
runMetricExportTests(
|
||||
t,
|
||||
nil,
|
||||
resource.Empty(),
|
||||
[]testRecord{r, r},
|
||||
[]*metricpb.ResourceMetrics{
|
||||
{
|
||||
Resource: nil,
|
||||
ScopeMetrics: []*metricpb.ScopeMetrics{
|
||||
{
|
||||
Metrics: []*metricpb.Metric{
|
||||
{
|
||||
Name: "float64-count",
|
||||
Data: &metricpb.Metric_Sum{
|
||||
Sum: &metricpb.Sum{
|
||||
IsMonotonic: true,
|
||||
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsDouble{AsDouble: 11.0},
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsDouble{AsDouble: 11.0},
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestResourceMetricGroupingExport(t *testing.T) {
|
||||
runMetricExportTests(
|
||||
t,
|
||||
nil,
|
||||
testerAResource,
|
||||
[]testRecord{
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
testLibName,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
testLibName,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(2)),
|
||||
testLibName,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
testLibName,
|
||||
),
|
||||
},
|
||||
[]*metricpb.ResourceMetrics{
|
||||
{
|
||||
Resource: testerAResourcePb,
|
||||
ScopeMetrics: []*metricpb.ScopeMetrics{
|
||||
{
|
||||
Metrics: []*metricpb.Metric{
|
||||
{
|
||||
Name: "int64-count",
|
||||
Data: &metricpb.Metric_Sum{
|
||||
Sum: &metricpb.Sum{
|
||||
IsMonotonic: true,
|
||||
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
|
||||
Attributes: cpu2Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestResourceInstLibMetricGroupingExport(t *testing.T) {
|
||||
version1 := metric.WithInstrumentationVersion("v1")
|
||||
version2 := metric.WithInstrumentationVersion("v2")
|
||||
specialSchema := metric.WithSchemaURL("schurl")
|
||||
summingLib := "summing-lib"
|
||||
countingLib := "counting-lib"
|
||||
runMetricExportTests(
|
||||
t,
|
||||
nil,
|
||||
testerAResource,
|
||||
[]testRecord{
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
countingLib,
|
||||
version1,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
countingLib,
|
||||
version2,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
countingLib,
|
||||
version1,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(2)),
|
||||
countingLib,
|
||||
version1,
|
||||
),
|
||||
record(
|
||||
"int64-count",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
summingLib,
|
||||
specialSchema,
|
||||
),
|
||||
},
|
||||
[]*metricpb.ResourceMetrics{
|
||||
{
|
||||
Resource: testerAResourcePb,
|
||||
ScopeMetrics: []*metricpb.ScopeMetrics{
|
||||
{
|
||||
Scope: &commonpb.InstrumentationScope{
|
||||
Name: "counting-lib",
|
||||
Version: "v1",
|
||||
},
|
||||
Metrics: []*metricpb.Metric{
|
||||
{
|
||||
Name: "int64-count",
|
||||
Data: &metricpb.Metric_Sum{
|
||||
Sum: &metricpb.Sum{
|
||||
IsMonotonic: true,
|
||||
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
|
||||
Attributes: cpu2Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Scope: &commonpb.InstrumentationScope{
|
||||
Name: "counting-lib",
|
||||
Version: "v2",
|
||||
},
|
||||
Metrics: []*metricpb.Metric{
|
||||
{
|
||||
Name: "int64-count",
|
||||
Data: &metricpb.Metric_Sum{
|
||||
Sum: &metricpb.Sum{
|
||||
IsMonotonic: true,
|
||||
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Scope: &commonpb.InstrumentationScope{
|
||||
Name: "summing-lib",
|
||||
},
|
||||
SchemaUrl: "schurl",
|
||||
Metrics: []*metricpb.Metric{
|
||||
{
|
||||
Name: "int64-count",
|
||||
Data: &metricpb.Metric_Sum{
|
||||
Sum: &metricpb.Sum{
|
||||
IsMonotonic: true,
|
||||
AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestStatelessAggregationTemporality(t *testing.T) {
|
||||
type testcase struct {
|
||||
name string
|
||||
instrumentKind sdkapi.InstrumentKind
|
||||
aggTemporality metricpb.AggregationTemporality
|
||||
monotonic bool
|
||||
}
|
||||
|
||||
for _, k := range []testcase{
|
||||
{"counter", sdkapi.CounterInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, true},
|
||||
{"updowncounter", sdkapi.UpDownCounterInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, false},
|
||||
{"counterobserver", sdkapi.CounterObserverInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, true},
|
||||
{"updowncounterobserver", sdkapi.UpDownCounterObserverInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, false},
|
||||
} {
|
||||
t.Run(k.name, func(t *testing.T) {
|
||||
runMetricExportTests(
|
||||
t,
|
||||
[]otlpmetric.Option{
|
||||
otlpmetric.WithMetricAggregationTemporalitySelector(
|
||||
aggregation.StatelessTemporalitySelector(),
|
||||
),
|
||||
},
|
||||
testerAResource,
|
||||
[]testRecord{
|
||||
record(
|
||||
"instrument",
|
||||
k.instrumentKind,
|
||||
number.Int64Kind,
|
||||
append(baseKeyValues, cpuKey.Int(1)),
|
||||
testLibName,
|
||||
),
|
||||
},
|
||||
[]*metricpb.ResourceMetrics{
|
||||
{
|
||||
Resource: testerAResourcePb,
|
||||
ScopeMetrics: []*metricpb.ScopeMetrics{
|
||||
{
|
||||
Metrics: []*metricpb.Metric{
|
||||
{
|
||||
Name: "instrument",
|
||||
Data: &metricpb.Metric_Sum{
|
||||
Sum: &metricpb.Sum{
|
||||
IsMonotonic: k.monotonic,
|
||||
AggregationTemporality: k.aggTemporality,
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11},
|
||||
Attributes: cpu1Attrs,
|
||||
StartTimeUnixNano: startTime(),
|
||||
TimeUnixNano: pointTime(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func runMetricExportTests(t *testing.T, opts []otlpmetric.Option, res *resource.Resource, records []testRecord, expected []*metricpb.ResourceMetrics) {
|
||||
exp, driver := newExporter(t, opts...)
|
||||
|
||||
libraryRecs := map[instrumentation.Library][]export.Record{}
|
||||
for _, r := range records {
|
||||
lcopy := make([]attribute.KeyValue, len(r.attrs))
|
||||
copy(lcopy, r.attrs)
|
||||
desc := metrictest.NewDescriptor(r.name, r.iKind, r.nKind)
|
||||
labs := attribute.NewSet(lcopy...)
|
||||
|
||||
var agg, ckpt aggregator.Aggregator
|
||||
if r.iKind.Adding() {
|
||||
sums := sum.New(2)
|
||||
agg, ckpt = &sums[0], &sums[1]
|
||||
} else {
|
||||
histos := histogram.New(2, &desc, histogram.WithExplicitBoundaries(testHistogramBoundaries))
|
||||
agg, ckpt = &histos[0], &histos[1]
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
if r.iKind.Synchronous() {
|
||||
// For synchronous instruments, perform two updates: 1 and 10
|
||||
switch r.nKind {
|
||||
case number.Int64Kind:
|
||||
require.NoError(t, agg.Update(ctx, number.NewInt64Number(1), &desc))
|
||||
require.NoError(t, agg.Update(ctx, number.NewInt64Number(10), &desc))
|
||||
case number.Float64Kind:
|
||||
require.NoError(t, agg.Update(ctx, number.NewFloat64Number(1), &desc))
|
||||
require.NoError(t, agg.Update(ctx, number.NewFloat64Number(10), &desc))
|
||||
default:
|
||||
t.Fatalf("invalid number kind: %v", r.nKind)
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// For asynchronous instruments, perform a single update: 11
|
||||
switch r.nKind {
|
||||
case number.Int64Kind:
|
||||
require.NoError(t, agg.Update(ctx, number.NewInt64Number(11), &desc))
|
||||
case number.Float64Kind:
|
||||
require.NoError(t, agg.Update(ctx, number.NewFloat64Number(11), &desc))
|
||||
default:
|
||||
t.Fatalf("invalid number kind: %v", r.nKind)
|
||||
}
|
||||
}
|
||||
require.NoError(t, agg.SynchronizedMove(ckpt, &desc))
|
||||
|
||||
meterCfg := metric.NewMeterConfig(r.meterOpts...)
|
||||
lib := instrumentation.Library{
|
||||
Name: r.meterName,
|
||||
Version: meterCfg.InstrumentationVersion(),
|
||||
SchemaURL: meterCfg.SchemaURL(),
|
||||
}
|
||||
libraryRecs[lib] = append(libraryRecs[lib], export.NewRecord(&desc, &labs, ckpt.Aggregation(), intervalStart, intervalEnd))
|
||||
}
|
||||
assert.NoError(t, exp.Export(context.Background(), res, processortest.MultiInstrumentationLibraryReader(libraryRecs)))
|
||||
|
||||
// assert.ElementsMatch does not equate nested slices of different order,
|
||||
// therefore this requires the top level slice to be broken down.
|
||||
// Build a map of Resource/Scope pairs to Metrics, from that validate the
|
||||
// metric elements match for all expected pairs. Finally, make we saw all
|
||||
// expected pairs.
|
||||
keyFor := func(sm *metricpb.ScopeMetrics) string {
|
||||
return fmt.Sprintf("%s/%s/%s", sm.GetScope().GetName(), sm.GetScope().GetVersion(), sm.GetSchemaUrl())
|
||||
}
|
||||
got := map[string][]*metricpb.Metric{}
|
||||
for _, rm := range driver.rm {
|
||||
for _, sm := range rm.ScopeMetrics {
|
||||
k := keyFor(sm)
|
||||
got[k] = append(got[k], sm.GetMetrics()...)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
seen := map[string]struct{}{}
|
||||
for _, rm := range expected {
|
||||
for _, sm := range rm.ScopeMetrics {
|
||||
k := keyFor(sm)
|
||||
seen[k] = struct{}{}
|
||||
g, ok := got[k]
|
||||
if !ok {
|
||||
t.Errorf("missing metrics for:\n\tInstrumentationScope: %q\n", k)
|
||||
continue
|
||||
}
|
||||
if !assert.Len(t, g, len(sm.GetMetrics())) {
|
||||
continue
|
||||
}
|
||||
for i, expected := range sm.GetMetrics() {
|
||||
assert.Equal(t, "", cmp.Diff(expected, g[i], protocmp.Transform()))
|
||||
}
|
||||
}
|
||||
}
|
||||
for k := range got {
|
||||
if _, ok := seen[k]; !ok {
|
||||
t.Errorf("did not expect metrics for:\n\tInstrumentationScope: %s\n", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEmptyMetricExport(t *testing.T) {
|
||||
exp, driver := newExporter(t)
|
||||
|
||||
for _, test := range []struct {
|
||||
records []export.Record
|
||||
want []*metricpb.ResourceMetrics
|
||||
}{
|
||||
{
|
||||
[]export.Record(nil),
|
||||
[]*metricpb.ResourceMetrics(nil),
|
||||
},
|
||||
{
|
||||
[]export.Record{},
|
||||
[]*metricpb.ResourceMetrics(nil),
|
||||
},
|
||||
} {
|
||||
driver.Reset()
|
||||
require.NoError(t, exp.Export(context.Background(), resource.Empty(), processortest.MultiInstrumentationLibraryReader(map[instrumentation.Library][]export.Record{
|
||||
{
|
||||
Name: testLibName,
|
||||
}: test.records,
|
||||
})))
|
||||
assert.Equal(t, test.want, driver.rm)
|
||||
for i := 0; i < goroutines; i++ {
|
||||
<-first
|
||||
}
|
||||
close(first)
|
||||
assert.NoError(t, exp.Shutdown(ctx))
|
||||
assert.ErrorIs(t, exp.Shutdown(ctx), errShutdown)
|
||||
|
||||
close(done)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
module go.opentelemetry.io/otel/exporters/otlp/otlpmetric
|
||||
|
||||
go 1.17
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/google/go-cmp v0.5.8
|
||||
github.com/stretchr/testify v1.7.1
|
||||
go.opentelemetry.io/otel v1.10.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0
|
||||
go.opentelemetry.io/otel/metric v0.31.0
|
||||
go.opentelemetry.io/otel/sdk v1.10.0
|
||||
go.opentelemetry.io/otel/sdk/metric v0.31.0
|
||||
go.opentelemetry.io/otel/metric v0.0.0-00010101000000-000000000000
|
||||
go.opentelemetry.io/otel/sdk v0.0.0-00010101000000-000000000000
|
||||
go.opentelemetry.io/otel/sdk/metric v0.0.0-00010101000000-000000000000
|
||||
go.opentelemetry.io/proto/otlp v0.19.0
|
||||
google.golang.org/grpc v1.46.2
|
||||
google.golang.org/protobuf v1.28.0
|
||||
google.golang.org/grpc v1.42.0
|
||||
google.golang.org/protobuf v1.27.1
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -31,14 +31,14 @@ require (
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
|
||||
)
|
||||
|
||||
replace go.opentelemetry.io/otel/metric => ../../../metric
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric
|
||||
|
||||
replace go.opentelemetry.io/otel => ../../..
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk => ../../../sdk
|
||||
|
||||
replace go.opentelemetry.io/otel/metric => ../../../metric
|
||||
replace go.opentelemetry.io/otel/exporters/otlp/internal/retry => ../internal/retry
|
||||
|
||||
replace go.opentelemetry.io/otel/trace => ../../../trace
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric
|
||||
|
||||
replace go.opentelemetry.io/otel/exporters/otlp/internal/retry => ../internal/retry
|
||||
|
||||
@@ -35,7 +35,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@@ -51,7 +50,6 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -61,7 +59,6 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
@@ -226,7 +223,6 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@@ -268,9 +264,7 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -398,9 +392,8 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ=
|
||||
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -413,9 +406,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metrictransform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
)
|
||||
|
||||
// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
|
||||
func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue {
|
||||
if len(attrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]*commonpb.KeyValue, 0, len(attrs))
|
||||
for _, kv := range attrs {
|
||||
out = append(out, KeyValue(kv))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Iterator transforms an attribute iterator into OTLP key-values.
|
||||
func Iterator(iter attribute.Iterator) []*commonpb.KeyValue {
|
||||
l := iter.Len()
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]*commonpb.KeyValue, 0, l)
|
||||
for iter.Next() {
|
||||
out = append(out, KeyValue(iter.Attribute()))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ResourceAttributes transforms a Resource OTLP key-values.
|
||||
func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue {
|
||||
return Iterator(res.Iter())
|
||||
}
|
||||
|
||||
// KeyValue transforms an attribute KeyValue into an OTLP key-value.
|
||||
func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue {
|
||||
return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
|
||||
}
|
||||
|
||||
// Value transforms an attribute Value into an OTLP AnyValue.
|
||||
func Value(v attribute.Value) *commonpb.AnyValue {
|
||||
av := new(commonpb.AnyValue)
|
||||
switch v.Type() {
|
||||
case attribute.BOOL:
|
||||
av.Value = &commonpb.AnyValue_BoolValue{
|
||||
BoolValue: v.AsBool(),
|
||||
}
|
||||
case attribute.BOOLSLICE:
|
||||
av.Value = &commonpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &commonpb.ArrayValue{
|
||||
Values: boolSliceValues(v.AsBoolSlice()),
|
||||
},
|
||||
}
|
||||
case attribute.INT64:
|
||||
av.Value = &commonpb.AnyValue_IntValue{
|
||||
IntValue: v.AsInt64(),
|
||||
}
|
||||
case attribute.INT64SLICE:
|
||||
av.Value = &commonpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &commonpb.ArrayValue{
|
||||
Values: int64SliceValues(v.AsInt64Slice()),
|
||||
},
|
||||
}
|
||||
case attribute.FLOAT64:
|
||||
av.Value = &commonpb.AnyValue_DoubleValue{
|
||||
DoubleValue: v.AsFloat64(),
|
||||
}
|
||||
case attribute.FLOAT64SLICE:
|
||||
av.Value = &commonpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &commonpb.ArrayValue{
|
||||
Values: float64SliceValues(v.AsFloat64Slice()),
|
||||
},
|
||||
}
|
||||
case attribute.STRING:
|
||||
av.Value = &commonpb.AnyValue_StringValue{
|
||||
StringValue: v.AsString(),
|
||||
}
|
||||
case attribute.STRINGSLICE:
|
||||
av.Value = &commonpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &commonpb.ArrayValue{
|
||||
Values: stringSliceValues(v.AsStringSlice()),
|
||||
},
|
||||
}
|
||||
default:
|
||||
av.Value = &commonpb.AnyValue_StringValue{
|
||||
StringValue: "INVALID",
|
||||
}
|
||||
}
|
||||
return av
|
||||
}
|
||||
|
||||
func boolSliceValues(vals []bool) []*commonpb.AnyValue {
|
||||
converted := make([]*commonpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_BoolValue{
|
||||
BoolValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func int64SliceValues(vals []int64) []*commonpb.AnyValue {
|
||||
converted := make([]*commonpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_IntValue{
|
||||
IntValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func float64SliceValues(vals []float64) []*commonpb.AnyValue {
|
||||
converted := make([]*commonpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_DoubleValue{
|
||||
DoubleValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func stringSliceValues(vals []string) []*commonpb.AnyValue {
|
||||
converted := make([]*commonpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_StringValue{
|
||||
StringValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
@@ -1,258 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metrictransform
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
)
|
||||
|
||||
type attributeTest struct {
|
||||
attrs []attribute.KeyValue
|
||||
expected []*commonpb.KeyValue
|
||||
}
|
||||
|
||||
func TestAttributes(t *testing.T) {
|
||||
for _, test := range []attributeTest{
|
||||
{nil, nil},
|
||||
{
|
||||
[]attribute.KeyValue{
|
||||
attribute.Int("int to int", 123),
|
||||
attribute.Int64("int64 to int64", 1234567),
|
||||
attribute.Float64("float64 to double", 1.61),
|
||||
attribute.String("string to string", "string"),
|
||||
attribute.Bool("bool to bool", true),
|
||||
},
|
||||
[]*commonpb.KeyValue{
|
||||
{
|
||||
Key: "int to int",
|
||||
Value: &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_IntValue{
|
||||
IntValue: 123,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: "int64 to int64",
|
||||
Value: &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_IntValue{
|
||||
IntValue: 1234567,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: "float64 to double",
|
||||
Value: &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_DoubleValue{
|
||||
DoubleValue: 1.61,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: "string to string",
|
||||
Value: &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_StringValue{
|
||||
StringValue: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: "bool to bool",
|
||||
Value: &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_BoolValue{
|
||||
BoolValue: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
got := KeyValues(test.attrs)
|
||||
if !assert.Len(t, got, len(test.expected)) {
|
||||
continue
|
||||
}
|
||||
for i, actual := range got {
|
||||
if a, ok := actual.Value.Value.(*commonpb.AnyValue_DoubleValue); ok {
|
||||
e, ok := test.expected[i].Value.Value.(*commonpb.AnyValue_DoubleValue)
|
||||
if !ok {
|
||||
t.Errorf("expected AnyValue_DoubleValue, got %T", test.expected[i].Value.Value)
|
||||
continue
|
||||
}
|
||||
if !assert.InDelta(t, e.DoubleValue, a.DoubleValue, 0.01) {
|
||||
continue
|
||||
}
|
||||
e.DoubleValue = a.DoubleValue
|
||||
}
|
||||
assert.Equal(t, test.expected[i], actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestArrayAttributes(t *testing.T) {
|
||||
// Array KeyValue supports only arrays of primitive types:
|
||||
// "bool", "int", "int64",
|
||||
// "float64", "string",
|
||||
for _, test := range []attributeTest{
|
||||
{nil, nil},
|
||||
{
|
||||
[]attribute.KeyValue{
|
||||
{
|
||||
Key: attribute.Key("invalid"),
|
||||
Value: attribute.Value{},
|
||||
},
|
||||
},
|
||||
[]*commonpb.KeyValue{
|
||||
{
|
||||
Key: "invalid",
|
||||
Value: &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_StringValue{
|
||||
StringValue: "INVALID",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
[]attribute.KeyValue{
|
||||
attribute.BoolSlice("bool slice to bool array", []bool{true, false}),
|
||||
attribute.IntSlice("int slice to int64 array", []int{1, 2, 3}),
|
||||
attribute.Int64Slice("int64 slice to int64 array", []int64{1, 2, 3}),
|
||||
attribute.Float64Slice("float64 slice to double array", []float64{1.11, 2.22, 3.33}),
|
||||
attribute.StringSlice("string slice to string array", []string{"foo", "bar", "baz"}),
|
||||
},
|
||||
[]*commonpb.KeyValue{
|
||||
newOTelBoolArray("bool slice to bool array", []bool{true, false}),
|
||||
newOTelIntArray("int slice to int64 array", []int64{1, 2, 3}),
|
||||
newOTelIntArray("int64 slice to int64 array", []int64{1, 2, 3}),
|
||||
newOTelDoubleArray("float64 slice to double array", []float64{1.11, 2.22, 3.33}),
|
||||
newOTelStringArray("string slice to string array", []string{"foo", "bar", "baz"}),
|
||||
},
|
||||
},
|
||||
} {
|
||||
actualArrayAttributes := KeyValues(test.attrs)
|
||||
expectedArrayAttributes := test.expected
|
||||
if !assert.Len(t, actualArrayAttributes, len(expectedArrayAttributes)) {
|
||||
continue
|
||||
}
|
||||
|
||||
for i, actualArrayAttr := range actualArrayAttributes {
|
||||
expectedArrayAttr := expectedArrayAttributes[i]
|
||||
expectedKey, actualKey := expectedArrayAttr.Key, actualArrayAttr.Key
|
||||
if !assert.Equal(t, expectedKey, actualKey) {
|
||||
continue
|
||||
}
|
||||
|
||||
expected := expectedArrayAttr.Value.GetArrayValue()
|
||||
actual := actualArrayAttr.Value.GetArrayValue()
|
||||
if expected == nil {
|
||||
assert.Nil(t, actual)
|
||||
continue
|
||||
}
|
||||
if assert.NotNil(t, actual, "expected not nil for %s", actualKey) {
|
||||
assertExpectedArrayValues(t, expected.Values, actual.Values)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func assertExpectedArrayValues(t *testing.T, expectedValues, actualValues []*commonpb.AnyValue) {
|
||||
for i, actual := range actualValues {
|
||||
expected := expectedValues[i]
|
||||
if a, ok := actual.Value.(*commonpb.AnyValue_DoubleValue); ok {
|
||||
e, ok := expected.Value.(*commonpb.AnyValue_DoubleValue)
|
||||
if !ok {
|
||||
t.Errorf("expected AnyValue_DoubleValue, got %T", expected.Value)
|
||||
continue
|
||||
}
|
||||
if !assert.InDelta(t, e.DoubleValue, a.DoubleValue, 0.01) {
|
||||
continue
|
||||
}
|
||||
e.DoubleValue = a.DoubleValue
|
||||
}
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func newOTelBoolArray(key string, values []bool) *commonpb.KeyValue {
|
||||
arrayValues := []*commonpb.AnyValue{}
|
||||
for _, b := range values {
|
||||
arrayValues = append(arrayValues, &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_BoolValue{
|
||||
BoolValue: b,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return newOTelArray(key, arrayValues)
|
||||
}
|
||||
|
||||
func newOTelIntArray(key string, values []int64) *commonpb.KeyValue {
|
||||
arrayValues := []*commonpb.AnyValue{}
|
||||
|
||||
for _, i := range values {
|
||||
arrayValues = append(arrayValues, &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_IntValue{
|
||||
IntValue: i,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return newOTelArray(key, arrayValues)
|
||||
}
|
||||
|
||||
func newOTelDoubleArray(key string, values []float64) *commonpb.KeyValue {
|
||||
arrayValues := []*commonpb.AnyValue{}
|
||||
|
||||
for _, d := range values {
|
||||
arrayValues = append(arrayValues, &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_DoubleValue{
|
||||
DoubleValue: d,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return newOTelArray(key, arrayValues)
|
||||
}
|
||||
|
||||
func newOTelStringArray(key string, values []string) *commonpb.KeyValue {
|
||||
arrayValues := []*commonpb.AnyValue{}
|
||||
|
||||
for _, s := range values {
|
||||
arrayValues = append(arrayValues, &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_StringValue{
|
||||
StringValue: s,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return newOTelArray(key, arrayValues)
|
||||
}
|
||||
|
||||
func newOTelArray(key string, arrayValues []*commonpb.AnyValue) *commonpb.KeyValue {
|
||||
return &commonpb.KeyValue{
|
||||
Key: key,
|
||||
Value: &commonpb.AnyValue{
|
||||
Value: &commonpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &commonpb.ArrayValue{
|
||||
Values: arrayValues,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,437 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package metrictransform provides translations for opentelemetry-go concepts and
|
||||
// structures to otlp structures.
|
||||
package metrictransform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUnimplementedAgg is returned when a transformation of an unimplemented
|
||||
// aggregator is attempted.
|
||||
ErrUnimplementedAgg = errors.New("unimplemented aggregator")
|
||||
|
||||
// ErrIncompatibleAgg is returned when
|
||||
// aggregation.Kind implies an interface conversion that has
|
||||
// failed.
|
||||
ErrIncompatibleAgg = errors.New("incompatible aggregation type")
|
||||
|
||||
// ErrUnknownValueType is returned when a transformation of an unknown value
|
||||
// is attempted.
|
||||
ErrUnknownValueType = errors.New("invalid value type")
|
||||
|
||||
// ErrContextCanceled is returned when a context cancellation halts a
|
||||
// transformation.
|
||||
ErrContextCanceled = errors.New("context canceled")
|
||||
|
||||
// ErrTransforming is returned when an unexected error is encountered transforming.
|
||||
ErrTransforming = errors.New("transforming failed")
|
||||
)
|
||||
|
||||
// result is the product of transforming Records into OTLP Metrics.
|
||||
type result struct {
|
||||
Metric *metricpb.Metric
|
||||
Err error
|
||||
}
|
||||
|
||||
// toNanos returns the number of nanoseconds since the UNIX epoch.
|
||||
func toNanos(t time.Time) uint64 {
|
||||
if t.IsZero() {
|
||||
return 0
|
||||
}
|
||||
return uint64(t.UnixNano())
|
||||
}
|
||||
|
||||
// InstrumentationLibraryReader transforms all records contained in a checkpoint into
|
||||
// batched OTLP ResourceMetrics.
|
||||
func InstrumentationLibraryReader(ctx context.Context, temporalitySelector aggregation.TemporalitySelector, res *resource.Resource, ilmr export.InstrumentationLibraryReader, numWorkers uint) (*metricpb.ResourceMetrics, error) {
|
||||
var sms []*metricpb.ScopeMetrics
|
||||
|
||||
err := ilmr.ForEach(func(lib instrumentation.Library, mr export.Reader) error {
|
||||
records, errc := source(ctx, temporalitySelector, mr)
|
||||
|
||||
// Start a fixed number of goroutines to transform records.
|
||||
transformed := make(chan result)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(int(numWorkers))
|
||||
for i := uint(0); i < numWorkers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
transformer(ctx, temporalitySelector, records, transformed)
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(transformed)
|
||||
}()
|
||||
|
||||
// Synchronously collect the transformed records and transmit.
|
||||
ms, err := sink(ctx, transformed)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// source is complete, check for any errors.
|
||||
if err := <-errc; err != nil {
|
||||
return err
|
||||
}
|
||||
if len(ms) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sms = append(sms, &metricpb.ScopeMetrics{
|
||||
Metrics: ms,
|
||||
SchemaUrl: lib.SchemaURL,
|
||||
Scope: &commonpb.InstrumentationScope{
|
||||
Name: lib.Name,
|
||||
Version: lib.Version,
|
||||
},
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if len(sms) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rms := &metricpb.ResourceMetrics{
|
||||
Resource: Resource(res),
|
||||
SchemaUrl: res.SchemaURL(),
|
||||
ScopeMetrics: sms,
|
||||
}
|
||||
|
||||
return rms, err
|
||||
}
|
||||
|
||||
// source starts a goroutine that sends each one of the Records yielded by
|
||||
// the Reader on the returned chan. Any error encountered will be sent
|
||||
// on the returned error chan after seeding is complete.
|
||||
func source(ctx context.Context, temporalitySelector aggregation.TemporalitySelector, mr export.Reader) (<-chan export.Record, <-chan error) {
|
||||
errc := make(chan error, 1)
|
||||
out := make(chan export.Record)
|
||||
// Seed records into process.
|
||||
go func() {
|
||||
defer close(out)
|
||||
// No select is needed since errc is buffered.
|
||||
errc <- mr.ForEach(temporalitySelector, func(r export.Record) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ErrContextCanceled
|
||||
case out <- r:
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
return out, errc
|
||||
}
|
||||
|
||||
// transformer transforms records read from the passed in chan into
|
||||
// OTLP Metrics which are sent on the out chan.
|
||||
func transformer(ctx context.Context, temporalitySelector aggregation.TemporalitySelector, in <-chan export.Record, out chan<- result) {
|
||||
for r := range in {
|
||||
m, err := Record(temporalitySelector, r)
|
||||
// Propagate errors, but do not send empty results.
|
||||
if err == nil && m == nil {
|
||||
continue
|
||||
}
|
||||
res := result{
|
||||
Metric: m,
|
||||
Err: err,
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case out <- res:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sink collects transformed Records and batches them.
|
||||
//
|
||||
// Any errors encountered transforming input will be reported with an
|
||||
// ErrTransforming as well as the completed ResourceMetrics. It is up to the
|
||||
// caller to handle any incorrect data in these ResourceMetric.
|
||||
func sink(ctx context.Context, in <-chan result) ([]*metricpb.Metric, error) {
|
||||
var errStrings []string
|
||||
|
||||
// Group by the MetricDescriptor.
|
||||
grouped := map[string]*metricpb.Metric{}
|
||||
for res := range in {
|
||||
if res.Err != nil {
|
||||
errStrings = append(errStrings, res.Err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
mID := res.Metric.GetName()
|
||||
m, ok := grouped[mID]
|
||||
if !ok {
|
||||
grouped[mID] = res.Metric
|
||||
continue
|
||||
}
|
||||
// Note: There is extra work happening in this code that can be
|
||||
// improved when the work described in #2119 is completed. The SDK has
|
||||
// a guarantee that no more than one point per period per attribute
|
||||
// set is produced, so this fallthrough should never happen. The final
|
||||
// step of #2119 is to remove all the grouping logic here.
|
||||
switch res.Metric.Data.(type) {
|
||||
case *metricpb.Metric_Gauge:
|
||||
m.GetGauge().DataPoints = append(m.GetGauge().DataPoints, res.Metric.GetGauge().DataPoints...)
|
||||
case *metricpb.Metric_Sum:
|
||||
m.GetSum().DataPoints = append(m.GetSum().DataPoints, res.Metric.GetSum().DataPoints...)
|
||||
case *metricpb.Metric_Histogram:
|
||||
m.GetHistogram().DataPoints = append(m.GetHistogram().DataPoints, res.Metric.GetHistogram().DataPoints...)
|
||||
case *metricpb.Metric_Summary:
|
||||
m.GetSummary().DataPoints = append(m.GetSummary().DataPoints, res.Metric.GetSummary().DataPoints...)
|
||||
default:
|
||||
err := fmt.Sprintf("unsupported metric type: %T", res.Metric.Data)
|
||||
errStrings = append(errStrings, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(grouped) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ms := make([]*metricpb.Metric, 0, len(grouped))
|
||||
for _, m := range grouped {
|
||||
ms = append(ms, m)
|
||||
}
|
||||
|
||||
// Report any transform errors.
|
||||
if len(errStrings) > 0 {
|
||||
return ms, fmt.Errorf("%w:\n -%s", ErrTransforming, strings.Join(errStrings, "\n -"))
|
||||
}
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
// Record transforms a Record into an OTLP Metric. An ErrIncompatibleAgg
|
||||
// error is returned if the Record Aggregator is not supported.
|
||||
func Record(temporalitySelector aggregation.TemporalitySelector, r export.Record) (*metricpb.Metric, error) {
|
||||
agg := r.Aggregation()
|
||||
switch agg.Kind() {
|
||||
case aggregation.HistogramKind:
|
||||
h, ok := agg.(aggregation.Histogram)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
|
||||
}
|
||||
return histogramPoint(r, temporalitySelector.TemporalityFor(r.Descriptor(), aggregation.HistogramKind), h)
|
||||
|
||||
case aggregation.SumKind:
|
||||
s, ok := agg.(aggregation.Sum)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
|
||||
}
|
||||
sum, err := s.Sum()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sumPoint(r, sum, r.StartTime(), r.EndTime(), temporalitySelector.TemporalityFor(r.Descriptor(), aggregation.SumKind), r.Descriptor().InstrumentKind().Monotonic())
|
||||
|
||||
case aggregation.LastValueKind:
|
||||
lv, ok := agg.(aggregation.LastValue)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg)
|
||||
}
|
||||
value, tm, err := lv.LastValue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gaugePoint(r, value, time.Time{}, tm)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %T", ErrUnimplementedAgg, agg)
|
||||
}
|
||||
}
|
||||
|
||||
func gaugePoint(record export.Record, num number.Number, start, end time.Time) (*metricpb.Metric, error) {
|
||||
desc := record.Descriptor()
|
||||
attrs := record.Attributes()
|
||||
|
||||
m := &metricpb.Metric{
|
||||
Name: desc.Name(),
|
||||
Description: desc.Description(),
|
||||
Unit: string(desc.Unit()),
|
||||
}
|
||||
|
||||
switch n := desc.NumberKind(); n {
|
||||
case number.Int64Kind:
|
||||
m.Data = &metricpb.Metric_Gauge{
|
||||
Gauge: &metricpb.Gauge{
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{
|
||||
AsInt: num.CoerceToInt64(n),
|
||||
},
|
||||
Attributes: Iterator(attrs.Iter()),
|
||||
StartTimeUnixNano: toNanos(start),
|
||||
TimeUnixNano: toNanos(end),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case number.Float64Kind:
|
||||
m.Data = &metricpb.Metric_Gauge{
|
||||
Gauge: &metricpb.Gauge{
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsDouble{
|
||||
AsDouble: num.CoerceToFloat64(n),
|
||||
},
|
||||
Attributes: Iterator(attrs.Iter()),
|
||||
StartTimeUnixNano: toNanos(start),
|
||||
TimeUnixNano: toNanos(end),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func sdkTemporalityToTemporality(temporality aggregation.Temporality) metricpb.AggregationTemporality {
|
||||
switch temporality {
|
||||
case aggregation.DeltaTemporality:
|
||||
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA
|
||||
case aggregation.CumulativeTemporality:
|
||||
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE
|
||||
}
|
||||
return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED
|
||||
}
|
||||
|
||||
func sumPoint(record export.Record, num number.Number, start, end time.Time, temporality aggregation.Temporality, monotonic bool) (*metricpb.Metric, error) {
|
||||
desc := record.Descriptor()
|
||||
attrs := record.Attributes()
|
||||
|
||||
m := &metricpb.Metric{
|
||||
Name: desc.Name(),
|
||||
Description: desc.Description(),
|
||||
Unit: string(desc.Unit()),
|
||||
}
|
||||
|
||||
switch n := desc.NumberKind(); n {
|
||||
case number.Int64Kind:
|
||||
m.Data = &metricpb.Metric_Sum{
|
||||
Sum: &metricpb.Sum{
|
||||
IsMonotonic: monotonic,
|
||||
AggregationTemporality: sdkTemporalityToTemporality(temporality),
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsInt{
|
||||
AsInt: num.CoerceToInt64(n),
|
||||
},
|
||||
Attributes: Iterator(attrs.Iter()),
|
||||
StartTimeUnixNano: toNanos(start),
|
||||
TimeUnixNano: toNanos(end),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case number.Float64Kind:
|
||||
m.Data = &metricpb.Metric_Sum{
|
||||
Sum: &metricpb.Sum{
|
||||
IsMonotonic: monotonic,
|
||||
AggregationTemporality: sdkTemporalityToTemporality(temporality),
|
||||
DataPoints: []*metricpb.NumberDataPoint{
|
||||
{
|
||||
Value: &metricpb.NumberDataPoint_AsDouble{
|
||||
AsDouble: num.CoerceToFloat64(n),
|
||||
},
|
||||
Attributes: Iterator(attrs.Iter()),
|
||||
StartTimeUnixNano: toNanos(start),
|
||||
TimeUnixNano: toNanos(end),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n)
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func histogramValues(a aggregation.Histogram) (boundaries []float64, counts []uint64, err error) {
|
||||
var buckets aggregation.Buckets
|
||||
if buckets, err = a.Histogram(); err != nil {
|
||||
return
|
||||
}
|
||||
boundaries, counts = buckets.Boundaries, buckets.Counts
|
||||
if len(counts) != len(boundaries)+1 {
|
||||
err = ErrTransforming
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// histogram transforms a Histogram Aggregator into an OTLP Metric.
|
||||
func histogramPoint(record export.Record, temporality aggregation.Temporality, a aggregation.Histogram) (*metricpb.Metric, error) {
|
||||
desc := record.Descriptor()
|
||||
attrs := record.Attributes()
|
||||
boundaries, counts, err := histogramValues(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
count, err := a.Count()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sum, err := a.Sum()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sumFloat64 := sum.CoerceToFloat64(desc.NumberKind())
|
||||
m := &metricpb.Metric{
|
||||
Name: desc.Name(),
|
||||
Description: desc.Description(),
|
||||
Unit: string(desc.Unit()),
|
||||
Data: &metricpb.Metric_Histogram{
|
||||
Histogram: &metricpb.Histogram{
|
||||
AggregationTemporality: sdkTemporalityToTemporality(temporality),
|
||||
DataPoints: []*metricpb.HistogramDataPoint{
|
||||
{
|
||||
Sum: &sumFloat64,
|
||||
Attributes: Iterator(attrs.Iter()),
|
||||
StartTimeUnixNano: toNanos(record.StartTime()),
|
||||
TimeUnixNano: toNanos(record.EndTime()),
|
||||
Count: uint64(count),
|
||||
BucketCounts: counts,
|
||||
ExplicitBounds: boundaries,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
@@ -1,314 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metrictransform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// Timestamps used in this test:
|
||||
|
||||
intervalStart = time.Now()
|
||||
intervalEnd = intervalStart.Add(time.Hour)
|
||||
)
|
||||
|
||||
const (
|
||||
otelCumulative = metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE
|
||||
otelDelta = metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA
|
||||
)
|
||||
|
||||
func TestStringKeyValues(t *testing.T) {
|
||||
tests := []struct {
|
||||
kvs []attribute.KeyValue
|
||||
expected []*commonpb.KeyValue
|
||||
}{
|
||||
{
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[]attribute.KeyValue{},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[]attribute.KeyValue{
|
||||
attribute.Bool("true", true),
|
||||
attribute.Int64("one", 1),
|
||||
attribute.Int64("two", 2),
|
||||
attribute.Float64("three", 3),
|
||||
attribute.Int("four", 4),
|
||||
attribute.Int("five", 5),
|
||||
attribute.Float64("six", 6),
|
||||
attribute.Int("seven", 7),
|
||||
attribute.Int("eight", 8),
|
||||
attribute.String("the", "final word"),
|
||||
},
|
||||
[]*commonpb.KeyValue{
|
||||
{Key: "eight", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 8}}},
|
||||
{Key: "five", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 5}}},
|
||||
{Key: "four", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 4}}},
|
||||
{Key: "one", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 1}}},
|
||||
{Key: "seven", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 7}}},
|
||||
{Key: "six", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_DoubleValue{DoubleValue: 6.0}}},
|
||||
{Key: "the", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "final word"}}},
|
||||
{Key: "three", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_DoubleValue{DoubleValue: 3.0}}},
|
||||
{Key: "true", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_BoolValue{BoolValue: true}}},
|
||||
{Key: "two", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 2}}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
attrs := attribute.NewSet(test.kvs...)
|
||||
assert.Equal(t, test.expected, Iterator(attrs.Iter()))
|
||||
}
|
||||
}
|
||||
|
||||
func TestSumIntDataPoints(t *testing.T) {
|
||||
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
attrs := attribute.NewSet(attribute.String("one", "1"))
|
||||
sums := sum.New(2)
|
||||
s, ckpt := &sums[0], &sums[1]
|
||||
|
||||
assert.NoError(t, s.Update(context.Background(), number.Number(1), &desc))
|
||||
require.NoError(t, s.SynchronizedMove(ckpt, &desc))
|
||||
record := export.NewRecord(&desc, &attrs, ckpt.Aggregation(), intervalStart, intervalEnd)
|
||||
|
||||
value, err := ckpt.Sum()
|
||||
require.NoError(t, err)
|
||||
|
||||
if m, err := sumPoint(record, value, record.StartTime(), record.EndTime(), aggregation.CumulativeTemporality, true); assert.NoError(t, err) {
|
||||
assert.Nil(t, m.GetGauge())
|
||||
assert.Equal(t, &metricpb.Sum{
|
||||
AggregationTemporality: otelCumulative,
|
||||
IsMonotonic: true,
|
||||
DataPoints: []*metricpb.NumberDataPoint{{
|
||||
StartTimeUnixNano: uint64(intervalStart.UnixNano()),
|
||||
TimeUnixNano: uint64(intervalEnd.UnixNano()),
|
||||
Attributes: []*commonpb.KeyValue{
|
||||
{
|
||||
Key: "one",
|
||||
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "1"}},
|
||||
},
|
||||
},
|
||||
Value: &metricpb.NumberDataPoint_AsInt{
|
||||
AsInt: 1,
|
||||
},
|
||||
}},
|
||||
}, m.GetSum())
|
||||
assert.Nil(t, m.GetHistogram())
|
||||
assert.Nil(t, m.GetSummary())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSumFloatDataPoints(t *testing.T) {
|
||||
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Float64Kind)
|
||||
attrs := attribute.NewSet(attribute.String("one", "1"))
|
||||
sums := sum.New(2)
|
||||
s, ckpt := &sums[0], &sums[1]
|
||||
|
||||
assert.NoError(t, s.Update(context.Background(), number.NewFloat64Number(1), &desc))
|
||||
require.NoError(t, s.SynchronizedMove(ckpt, &desc))
|
||||
record := export.NewRecord(&desc, &attrs, ckpt.Aggregation(), intervalStart, intervalEnd)
|
||||
value, err := ckpt.Sum()
|
||||
require.NoError(t, err)
|
||||
|
||||
if m, err := sumPoint(record, value, record.StartTime(), record.EndTime(), aggregation.DeltaTemporality, false); assert.NoError(t, err) {
|
||||
assert.Nil(t, m.GetGauge())
|
||||
assert.Equal(t, &metricpb.Sum{
|
||||
IsMonotonic: false,
|
||||
AggregationTemporality: otelDelta,
|
||||
DataPoints: []*metricpb.NumberDataPoint{{
|
||||
Value: &metricpb.NumberDataPoint_AsDouble{
|
||||
AsDouble: 1.0,
|
||||
},
|
||||
StartTimeUnixNano: uint64(intervalStart.UnixNano()),
|
||||
TimeUnixNano: uint64(intervalEnd.UnixNano()),
|
||||
Attributes: []*commonpb.KeyValue{
|
||||
{
|
||||
Key: "one",
|
||||
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "1"}},
|
||||
},
|
||||
},
|
||||
}}}, m.GetSum())
|
||||
assert.Nil(t, m.GetHistogram())
|
||||
assert.Nil(t, m.GetSummary())
|
||||
}
|
||||
}
|
||||
|
||||
func TestLastValueIntDataPoints(t *testing.T) {
|
||||
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind)
|
||||
attrs := attribute.NewSet(attribute.String("one", "1"))
|
||||
lvs := lastvalue.New(2)
|
||||
lv, ckpt := &lvs[0], &lvs[1]
|
||||
|
||||
assert.NoError(t, lv.Update(context.Background(), number.Number(100), &desc))
|
||||
require.NoError(t, lv.SynchronizedMove(ckpt, &desc))
|
||||
record := export.NewRecord(&desc, &attrs, ckpt.Aggregation(), intervalStart, intervalEnd)
|
||||
value, timestamp, err := ckpt.LastValue()
|
||||
require.NoError(t, err)
|
||||
|
||||
if m, err := gaugePoint(record, value, time.Time{}, timestamp); assert.NoError(t, err) {
|
||||
assert.Equal(t, []*metricpb.NumberDataPoint{{
|
||||
StartTimeUnixNano: 0,
|
||||
TimeUnixNano: uint64(timestamp.UnixNano()),
|
||||
Attributes: []*commonpb.KeyValue{
|
||||
{
|
||||
Key: "one",
|
||||
Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "1"}},
|
||||
},
|
||||
},
|
||||
Value: &metricpb.NumberDataPoint_AsInt{
|
||||
AsInt: 100,
|
||||
},
|
||||
}}, m.GetGauge().DataPoints)
|
||||
assert.Nil(t, m.GetSum())
|
||||
assert.Nil(t, m.GetHistogram())
|
||||
assert.Nil(t, m.GetSummary())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSumErrUnknownValueType(t *testing.T) {
|
||||
desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Kind(-1))
|
||||
attrs := attribute.NewSet()
|
||||
s := &sum.New(1)[0]
|
||||
record := export.NewRecord(&desc, &attrs, s, intervalStart, intervalEnd)
|
||||
value, err := s.Sum()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = sumPoint(record, value, record.StartTime(), record.EndTime(), aggregation.CumulativeTemporality, true)
|
||||
assert.Error(t, err)
|
||||
if !errors.Is(err, ErrUnknownValueType) {
|
||||
t.Errorf("expected ErrUnknownValueType, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
type testAgg struct {
|
||||
kind aggregation.Kind
|
||||
agg aggregation.Aggregation
|
||||
}
|
||||
|
||||
func (t *testAgg) Kind() aggregation.Kind {
|
||||
return t.kind
|
||||
}
|
||||
|
||||
func (t *testAgg) Aggregation() aggregation.Aggregation {
|
||||
return t.agg
|
||||
}
|
||||
|
||||
// None of these three are used:
|
||||
|
||||
func (t *testAgg) Update(context.Context, number.Number, *sdkapi.Descriptor) error {
|
||||
return nil
|
||||
}
|
||||
func (t *testAgg) SynchronizedMove(aggregator.Aggregator, *sdkapi.Descriptor) error {
|
||||
return nil
|
||||
}
|
||||
func (t *testAgg) Merge(aggregator.Aggregator, *sdkapi.Descriptor) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type testErrSum struct {
|
||||
err error
|
||||
}
|
||||
|
||||
type testErrLastValue struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (te *testErrLastValue) LastValue() (number.Number, time.Time, error) {
|
||||
return 0, time.Time{}, te.err
|
||||
}
|
||||
func (te *testErrLastValue) Kind() aggregation.Kind {
|
||||
return aggregation.LastValueKind
|
||||
}
|
||||
|
||||
func (te *testErrSum) Sum() (number.Number, error) {
|
||||
return 0, te.err
|
||||
}
|
||||
func (te *testErrSum) Kind() aggregation.Kind {
|
||||
return aggregation.SumKind
|
||||
}
|
||||
|
||||
var _ aggregator.Aggregator = &testAgg{}
|
||||
var _ aggregation.Aggregation = &testAgg{}
|
||||
var _ aggregation.Sum = &testErrSum{}
|
||||
var _ aggregation.LastValue = &testErrLastValue{}
|
||||
|
||||
func TestRecordAggregatorIncompatibleErrors(t *testing.T) {
|
||||
makeMpb := func(kind aggregation.Kind, agg aggregation.Aggregation) (*metricpb.Metric, error) {
|
||||
desc := metrictest.NewDescriptor("things", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||
attrs := attribute.NewSet()
|
||||
test := &testAgg{
|
||||
kind: kind,
|
||||
agg: agg,
|
||||
}
|
||||
return Record(aggregation.CumulativeTemporalitySelector(), export.NewRecord(&desc, &attrs, test, intervalStart, intervalEnd))
|
||||
}
|
||||
|
||||
mpb, err := makeMpb(aggregation.SumKind, &lastvalue.New(1)[0])
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, mpb)
|
||||
require.True(t, errors.Is(err, ErrIncompatibleAgg))
|
||||
|
||||
mpb, err = makeMpb(aggregation.LastValueKind, &sum.New(1)[0])
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, mpb)
|
||||
require.True(t, errors.Is(err, ErrIncompatibleAgg))
|
||||
}
|
||||
|
||||
func TestRecordAggregatorUnexpectedErrors(t *testing.T) {
|
||||
makeMpb := func(kind aggregation.Kind, agg aggregation.Aggregation) (*metricpb.Metric, error) {
|
||||
desc := metrictest.NewDescriptor("things", sdkapi.CounterInstrumentKind, number.Int64Kind)
|
||||
attrs := attribute.NewSet()
|
||||
return Record(aggregation.CumulativeTemporalitySelector(), export.NewRecord(&desc, &attrs, agg, intervalStart, intervalEnd))
|
||||
}
|
||||
|
||||
errEx := fmt.Errorf("timeout")
|
||||
|
||||
mpb, err := makeMpb(aggregation.SumKind, &testErrSum{errEx})
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, mpb)
|
||||
require.True(t, errors.Is(err, errEx))
|
||||
|
||||
mpb, err = makeMpb(aggregation.LastValueKind, &testErrLastValue{errEx})
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, mpb)
|
||||
require.True(t, errors.Is(err, errEx))
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metrictransform
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
func TestNilResource(t *testing.T) {
|
||||
assert.Empty(t, Resource(nil))
|
||||
}
|
||||
|
||||
func TestEmptyResource(t *testing.T) {
|
||||
assert.Empty(t, Resource(&resource.Resource{}))
|
||||
}
|
||||
|
||||
/*
|
||||
* This does not include any testing on the ordering of Resource Attributes.
|
||||
* They are stored as a map internally to the Resource and their order is not
|
||||
* guaranteed.
|
||||
*/
|
||||
|
||||
func TestResourceAttributes(t *testing.T) {
|
||||
attrs := []attribute.KeyValue{attribute.Int("one", 1), attribute.Int("two", 2)}
|
||||
|
||||
got := Resource(resource.NewSchemaless(attrs...)).GetAttributes()
|
||||
if !assert.Len(t, attrs, 2) {
|
||||
return
|
||||
}
|
||||
assert.ElementsMatch(t, KeyValues(attrs), got)
|
||||
}
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpconfig_test
|
||||
package oconf_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -64,25 +64,25 @@ func (f *fileReader) readFile(filename string) ([]byte, error) {
|
||||
}
|
||||
|
||||
func TestConfigs(t *testing.T) {
|
||||
tlsCert, err := otlpconfig.CreateTLSConfig([]byte(WeakCertificate))
|
||||
tlsCert, err := oconf.CreateTLSConfig([]byte(WeakCertificate))
|
||||
assert.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
opts []otlpconfig.GenericOption
|
||||
opts []oconf.GenericOption
|
||||
env env
|
||||
fileReader fileReader
|
||||
asserts func(t *testing.T, c *otlpconfig.Config, grpcOption bool)
|
||||
asserts func(t *testing.T, c *oconf.Config, grpcOption bool)
|
||||
}{
|
||||
{
|
||||
name: "Test default configs",
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
if grpcOption {
|
||||
assert.Equal(t, "localhost:4317", c.Metrics.Endpoint)
|
||||
} else {
|
||||
assert.Equal(t, "localhost:4318", c.Metrics.Endpoint)
|
||||
}
|
||||
assert.Equal(t, otlpconfig.NoCompression, c.Metrics.Compression)
|
||||
assert.Equal(t, oconf.NoCompression, c.Metrics.Compression)
|
||||
assert.Equal(t, map[string]string(nil), c.Metrics.Headers)
|
||||
assert.Equal(t, 10*time.Second, c.Metrics.Timeout)
|
||||
},
|
||||
@@ -91,10 +91,10 @@ func TestConfigs(t *testing.T) {
|
||||
// Endpoint Tests
|
||||
{
|
||||
name: "Test With Endpoint",
|
||||
opts: []otlpconfig.GenericOption{
|
||||
otlpconfig.WithEndpoint("someendpoint"),
|
||||
opts: []oconf.GenericOption{
|
||||
oconf.WithEndpoint("someendpoint"),
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, "someendpoint", c.Metrics.Endpoint)
|
||||
},
|
||||
},
|
||||
@@ -103,7 +103,7 @@ func TestConfigs(t *testing.T) {
|
||||
env: map[string]string{
|
||||
"OTEL_EXPORTER_OTLP_ENDPOINT": "https://env.endpoint/prefix",
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.False(t, c.Metrics.Insecure)
|
||||
if grpcOption {
|
||||
assert.Equal(t, "env.endpoint/prefix", c.Metrics.Endpoint)
|
||||
@@ -119,7 +119,7 @@ func TestConfigs(t *testing.T) {
|
||||
"OTEL_EXPORTER_OTLP_ENDPOINT": "https://overrode.by.signal.specific/env/var",
|
||||
"OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "http://env.metrics.endpoint",
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.True(t, c.Metrics.Insecure)
|
||||
assert.Equal(t, "env.metrics.endpoint", c.Metrics.Endpoint)
|
||||
if !grpcOption {
|
||||
@@ -129,13 +129,13 @@ func TestConfigs(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Test Mixed Environment and With Endpoint",
|
||||
opts: []otlpconfig.GenericOption{
|
||||
otlpconfig.WithEndpoint("metrics_endpoint"),
|
||||
opts: []oconf.GenericOption{
|
||||
oconf.WithEndpoint("metrics_endpoint"),
|
||||
},
|
||||
env: map[string]string{
|
||||
"OTEL_EXPORTER_OTLP_ENDPOINT": "env_endpoint",
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, "metrics_endpoint", c.Metrics.Endpoint)
|
||||
},
|
||||
},
|
||||
@@ -144,7 +144,7 @@ func TestConfigs(t *testing.T) {
|
||||
env: map[string]string{
|
||||
"OTEL_EXPORTER_OTLP_ENDPOINT": "http://env_endpoint",
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
|
||||
assert.Equal(t, true, c.Metrics.Insecure)
|
||||
},
|
||||
@@ -154,7 +154,7 @@ func TestConfigs(t *testing.T) {
|
||||
env: map[string]string{
|
||||
"OTEL_EXPORTER_OTLP_ENDPOINT": " http://env_endpoint ",
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
|
||||
assert.Equal(t, true, c.Metrics.Insecure)
|
||||
},
|
||||
@@ -164,7 +164,7 @@ func TestConfigs(t *testing.T) {
|
||||
env: map[string]string{
|
||||
"OTEL_EXPORTER_OTLP_ENDPOINT": "https://env_endpoint",
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
|
||||
assert.Equal(t, false, c.Metrics.Insecure)
|
||||
},
|
||||
@@ -175,7 +175,7 @@ func TestConfigs(t *testing.T) {
|
||||
"OTEL_EXPORTER_OTLP_ENDPOINT": "HTTPS://overrode_by_signal_specific",
|
||||
"OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "HtTp://env_metrics_endpoint",
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, "env_metrics_endpoint", c.Metrics.Endpoint)
|
||||
assert.Equal(t, true, c.Metrics.Insecure)
|
||||
},
|
||||
@@ -184,7 +184,7 @@ func TestConfigs(t *testing.T) {
|
||||
// Certificate tests
|
||||
{
|
||||
name: "Test Default Certificate",
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
if grpcOption {
|
||||
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||
} else {
|
||||
@@ -194,10 +194,10 @@ func TestConfigs(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Test With Certificate",
|
||||
opts: []otlpconfig.GenericOption{
|
||||
otlpconfig.WithTLSClientConfig(tlsCert),
|
||||
opts: []oconf.GenericOption{
|
||||
oconf.WithTLSClientConfig(tlsCert),
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
if grpcOption {
|
||||
//TODO: make sure gRPC's credentials actually works
|
||||
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||
@@ -215,7 +215,7 @@ func TestConfigs(t *testing.T) {
|
||||
fileReader: fileReader{
|
||||
"cert_path": []byte(WeakCertificate),
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
if grpcOption {
|
||||
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||
} else {
|
||||
@@ -234,7 +234,7 @@ func TestConfigs(t *testing.T) {
|
||||
"cert_path": []byte(WeakCertificate),
|
||||
"invalid_cert": []byte("invalid certificate file."),
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
if grpcOption {
|
||||
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||
} else {
|
||||
@@ -245,14 +245,14 @@ func TestConfigs(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Test Mixed Environment and With Certificate",
|
||||
opts: []otlpconfig.GenericOption{},
|
||||
opts: []oconf.GenericOption{},
|
||||
env: map[string]string{
|
||||
"OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path",
|
||||
},
|
||||
fileReader: fileReader{
|
||||
"cert_path": []byte(WeakCertificate),
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
if grpcOption {
|
||||
assert.NotNil(t, c.Metrics.GRPCCredentials)
|
||||
} else {
|
||||
@@ -265,17 +265,17 @@ func TestConfigs(t *testing.T) {
|
||||
// Headers tests
|
||||
{
|
||||
name: "Test With Headers",
|
||||
opts: []otlpconfig.GenericOption{
|
||||
otlpconfig.WithHeaders(map[string]string{"h1": "v1"}),
|
||||
opts: []oconf.GenericOption{
|
||||
oconf.WithHeaders(map[string]string{"h1": "v1"}),
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, map[string]string{"h1": "v1"}, c.Metrics.Headers)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Test Environment Headers",
|
||||
env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers)
|
||||
},
|
||||
},
|
||||
@@ -285,17 +285,17 @@ func TestConfigs(t *testing.T) {
|
||||
"OTEL_EXPORTER_OTLP_HEADERS": "overrode_by_signal_specific",
|
||||
"OTEL_EXPORTER_OTLP_METRICS_HEADERS": "h1=v1,h2=v2",
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Test Mixed Environment and With Headers",
|
||||
env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"},
|
||||
opts: []otlpconfig.GenericOption{
|
||||
otlpconfig.WithHeaders(map[string]string{"m1": "mv1"}),
|
||||
opts: []oconf.GenericOption{
|
||||
oconf.WithHeaders(map[string]string{"m1": "mv1"}),
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, map[string]string{"m1": "mv1"}, c.Metrics.Headers)
|
||||
},
|
||||
},
|
||||
@@ -303,11 +303,11 @@ func TestConfigs(t *testing.T) {
|
||||
// Compression Tests
|
||||
{
|
||||
name: "Test With Compression",
|
||||
opts: []otlpconfig.GenericOption{
|
||||
otlpconfig.WithCompression(otlpconfig.GzipCompression),
|
||||
opts: []oconf.GenericOption{
|
||||
oconf.WithCompression(oconf.GzipCompression),
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
assert.Equal(t, otlpconfig.GzipCompression, c.Metrics.Compression)
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, oconf.GzipCompression, c.Metrics.Compression)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -315,8 +315,8 @@ func TestConfigs(t *testing.T) {
|
||||
env: map[string]string{
|
||||
"OTEL_EXPORTER_OTLP_COMPRESSION": "gzip",
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
assert.Equal(t, otlpconfig.GzipCompression, c.Metrics.Compression)
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, oconf.GzipCompression, c.Metrics.Compression)
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -324,30 +324,30 @@ func TestConfigs(t *testing.T) {
|
||||
env: map[string]string{
|
||||
"OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip",
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
assert.Equal(t, otlpconfig.GzipCompression, c.Metrics.Compression)
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, oconf.GzipCompression, c.Metrics.Compression)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Test Mixed Environment and With Compression",
|
||||
opts: []otlpconfig.GenericOption{
|
||||
otlpconfig.WithCompression(otlpconfig.NoCompression),
|
||||
opts: []oconf.GenericOption{
|
||||
oconf.WithCompression(oconf.NoCompression),
|
||||
},
|
||||
env: map[string]string{
|
||||
"OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip",
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
assert.Equal(t, otlpconfig.NoCompression, c.Metrics.Compression)
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, oconf.NoCompression, c.Metrics.Compression)
|
||||
},
|
||||
},
|
||||
|
||||
// Timeout Tests
|
||||
{
|
||||
name: "Test With Timeout",
|
||||
opts: []otlpconfig.GenericOption{
|
||||
otlpconfig.WithTimeout(time.Duration(5 * time.Second)),
|
||||
opts: []oconf.GenericOption{
|
||||
oconf.WithTimeout(time.Duration(5 * time.Second)),
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, 5*time.Second, c.Metrics.Timeout)
|
||||
},
|
||||
},
|
||||
@@ -356,7 +356,7 @@ func TestConfigs(t *testing.T) {
|
||||
env: map[string]string{
|
||||
"OTEL_EXPORTER_OTLP_TIMEOUT": "15000",
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, c.Metrics.Timeout, 15*time.Second)
|
||||
},
|
||||
},
|
||||
@@ -366,7 +366,7 @@ func TestConfigs(t *testing.T) {
|
||||
"OTEL_EXPORTER_OTLP_TIMEOUT": "15000",
|
||||
"OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000",
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, c.Metrics.Timeout, 28*time.Second)
|
||||
},
|
||||
},
|
||||
@@ -376,10 +376,10 @@ func TestConfigs(t *testing.T) {
|
||||
"OTEL_EXPORTER_OTLP_TIMEOUT": "15000",
|
||||
"OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000",
|
||||
},
|
||||
opts: []otlpconfig.GenericOption{
|
||||
otlpconfig.WithTimeout(5 * time.Second),
|
||||
opts: []oconf.GenericOption{
|
||||
oconf.WithTimeout(5 * time.Second),
|
||||
},
|
||||
asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) {
|
||||
asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
|
||||
assert.Equal(t, c.Metrics.Timeout, 5*time.Second)
|
||||
},
|
||||
},
|
||||
@@ -387,37 +387,37 @@ func TestConfigs(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
origEOR := otlpconfig.DefaultEnvOptionsReader
|
||||
otlpconfig.DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
|
||||
origEOR := oconf.DefaultEnvOptionsReader
|
||||
oconf.DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
|
||||
GetEnv: tt.env.getEnv,
|
||||
ReadFile: tt.fileReader.readFile,
|
||||
Namespace: "OTEL_EXPORTER_OTLP",
|
||||
}
|
||||
t.Cleanup(func() { otlpconfig.DefaultEnvOptionsReader = origEOR })
|
||||
t.Cleanup(func() { oconf.DefaultEnvOptionsReader = origEOR })
|
||||
|
||||
// Tests Generic options as HTTP Options
|
||||
cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(tt.opts)...)
|
||||
cfg := oconf.NewHTTPConfig(asHTTPOptions(tt.opts)...)
|
||||
tt.asserts(t, &cfg, false)
|
||||
|
||||
// Tests Generic options as gRPC Options
|
||||
cfg = otlpconfig.NewGRPCConfig(asGRPCOptions(tt.opts)...)
|
||||
cfg = oconf.NewGRPCConfig(asGRPCOptions(tt.opts)...)
|
||||
tt.asserts(t, &cfg, true)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func asHTTPOptions(opts []otlpconfig.GenericOption) []otlpconfig.HTTPOption {
|
||||
converted := make([]otlpconfig.HTTPOption, len(opts))
|
||||
func asHTTPOptions(opts []oconf.GenericOption) []oconf.HTTPOption {
|
||||
converted := make([]oconf.HTTPOption, len(opts))
|
||||
for i, o := range opts {
|
||||
converted[i] = otlpconfig.NewHTTPOption(o.ApplyHTTPOption)
|
||||
converted[i] = oconf.NewHTTPOption(o.ApplyHTTPOption)
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func asGRPCOptions(opts []otlpconfig.GenericOption) []otlpconfig.GRPCOption {
|
||||
converted := make([]otlpconfig.GRPCOption, len(opts))
|
||||
func asGRPCOptions(opts []oconf.GenericOption) []oconf.GRPCOption {
|
||||
converted := make([]oconf.GRPCOption, len(opts))
|
||||
for i, o := range opts {
|
||||
converted[i] = otlpconfig.NewGRPCOption(o.ApplyGRPCOption)
|
||||
converted[i] = oconf.NewGRPCOption(o.ApplyGRPCOption)
|
||||
}
|
||||
return converted
|
||||
}
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
|
||||
import "time"
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
254
exporters/otlp/otlpmetric/internal/otest/client.go
Normal file
254
exporters/otlp/otlpmetric/internal/otest/client.go
Normal file
@@ -0,0 +1,254 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
"go.opentelemetry.io/otel/metric/unit"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
|
||||
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
rpb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// Sat Jan 01 2000 00:00:00 GMT+0000.
|
||||
start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
|
||||
end = start.Add(30 * time.Second)
|
||||
|
||||
kvAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_StringValue{StringValue: "alice"},
|
||||
}}
|
||||
kvBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_StringValue{StringValue: "bob"},
|
||||
}}
|
||||
kvSrvName = &cpb.KeyValue{Key: "service.name", Value: &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_StringValue{StringValue: "test server"},
|
||||
}}
|
||||
kvSrvVer = &cpb.KeyValue{Key: "service.version", Value: &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"},
|
||||
}}
|
||||
|
||||
min, max, sum = 2.0, 4.0, 90.0
|
||||
hdp = []*mpb.HistogramDataPoint{{
|
||||
Attributes: []*cpb.KeyValue{kvAlice},
|
||||
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||
TimeUnixNano: uint64(end.UnixNano()),
|
||||
Count: 30,
|
||||
Sum: &sum,
|
||||
ExplicitBounds: []float64{1, 5},
|
||||
BucketCounts: []uint64{0, 30, 0},
|
||||
Min: &min,
|
||||
Max: &max,
|
||||
}}
|
||||
|
||||
hist = &mpb.Histogram{
|
||||
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
|
||||
DataPoints: hdp,
|
||||
}
|
||||
|
||||
dPtsInt64 = []*mpb.NumberDataPoint{
|
||||
{
|
||||
Attributes: []*cpb.KeyValue{kvAlice},
|
||||
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||
TimeUnixNano: uint64(end.UnixNano()),
|
||||
Value: &mpb.NumberDataPoint_AsInt{AsInt: 1},
|
||||
},
|
||||
{
|
||||
Attributes: []*cpb.KeyValue{kvBob},
|
||||
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||
TimeUnixNano: uint64(end.UnixNano()),
|
||||
Value: &mpb.NumberDataPoint_AsInt{AsInt: 2},
|
||||
},
|
||||
}
|
||||
dPtsFloat64 = []*mpb.NumberDataPoint{
|
||||
{
|
||||
Attributes: []*cpb.KeyValue{kvAlice},
|
||||
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||
TimeUnixNano: uint64(end.UnixNano()),
|
||||
Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0},
|
||||
},
|
||||
{
|
||||
Attributes: []*cpb.KeyValue{kvBob},
|
||||
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||
TimeUnixNano: uint64(end.UnixNano()),
|
||||
Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0},
|
||||
},
|
||||
}
|
||||
|
||||
sumInt64 = &mpb.Sum{
|
||||
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||
IsMonotonic: true,
|
||||
DataPoints: dPtsInt64,
|
||||
}
|
||||
sumFloat64 = &mpb.Sum{
|
||||
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
|
||||
IsMonotonic: false,
|
||||
DataPoints: dPtsFloat64,
|
||||
}
|
||||
|
||||
gaugeInt64 = &mpb.Gauge{DataPoints: dPtsInt64}
|
||||
gaugeFloat64 = &mpb.Gauge{DataPoints: dPtsFloat64}
|
||||
|
||||
metrics = []*mpb.Metric{
|
||||
{
|
||||
Name: "int64-gauge",
|
||||
Description: "Gauge with int64 values",
|
||||
Unit: string(unit.Dimensionless),
|
||||
Data: &mpb.Metric_Gauge{Gauge: gaugeInt64},
|
||||
},
|
||||
{
|
||||
Name: "float64-gauge",
|
||||
Description: "Gauge with float64 values",
|
||||
Unit: string(unit.Dimensionless),
|
||||
Data: &mpb.Metric_Gauge{Gauge: gaugeFloat64},
|
||||
},
|
||||
{
|
||||
Name: "int64-sum",
|
||||
Description: "Sum with int64 values",
|
||||
Unit: string(unit.Dimensionless),
|
||||
Data: &mpb.Metric_Sum{Sum: sumInt64},
|
||||
},
|
||||
{
|
||||
Name: "float64-sum",
|
||||
Description: "Sum with float64 values",
|
||||
Unit: string(unit.Dimensionless),
|
||||
Data: &mpb.Metric_Sum{Sum: sumFloat64},
|
||||
},
|
||||
{
|
||||
Name: "histogram",
|
||||
Description: "Histogram",
|
||||
Unit: string(unit.Dimensionless),
|
||||
Data: &mpb.Metric_Histogram{Histogram: hist},
|
||||
},
|
||||
}
|
||||
|
||||
scope = &cpb.InstrumentationScope{
|
||||
Name: "test/code/path",
|
||||
Version: "v0.1.0",
|
||||
}
|
||||
scopeMetrics = []*mpb.ScopeMetrics{{
|
||||
Scope: scope,
|
||||
Metrics: metrics,
|
||||
SchemaUrl: semconv.SchemaURL,
|
||||
}}
|
||||
|
||||
res = &rpb.Resource{
|
||||
Attributes: []*cpb.KeyValue{kvSrvName, kvSrvVer},
|
||||
}
|
||||
resourceMetrics = &mpb.ResourceMetrics{
|
||||
Resource: res,
|
||||
ScopeMetrics: scopeMetrics,
|
||||
SchemaUrl: semconv.SchemaURL,
|
||||
}
|
||||
)
|
||||
|
||||
// ClientFactory is a function that when called returns a
|
||||
// otlpmetric.Client implementation that is connected to also returned
|
||||
// Collector implementation. The Client is ready to upload metric data to the
|
||||
// Collector which is ready to store that data.
|
||||
type ClientFactory func() (otlpmetric.Client, Collector)
|
||||
|
||||
// RunClientTests runs a suite of Client integration tests. For example:
|
||||
//
|
||||
// t.Run("Integration", RunClientTests(factory))
|
||||
func RunClientTests(f ClientFactory) func(*testing.T) {
|
||||
return func(t *testing.T) {
|
||||
t.Run("ClientHonorsContextErrors", func(t *testing.T) {
|
||||
t.Run("Shutdown", testCtxErrs(func() func(context.Context) error {
|
||||
c, _ := f()
|
||||
return c.Shutdown
|
||||
}))
|
||||
|
||||
t.Run("ForceFlush", testCtxErrs(func() func(context.Context) error {
|
||||
c, _ := f()
|
||||
return c.ForceFlush
|
||||
}))
|
||||
|
||||
t.Run("UploadMetrics", testCtxErrs(func() func(context.Context) error {
|
||||
c, _ := f()
|
||||
return func(ctx context.Context) error {
|
||||
return c.UploadMetrics(ctx, nil)
|
||||
}
|
||||
}))
|
||||
})
|
||||
|
||||
t.Run("ForceFlushFlushes", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client, collector := f()
|
||||
require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
|
||||
|
||||
require.NoError(t, client.ForceFlush(ctx))
|
||||
rm := collector.Collect().Dump()
|
||||
// Data correctness is not important, just it was received.
|
||||
require.Greater(t, len(rm), 0, "no data uploaded")
|
||||
|
||||
require.NoError(t, client.Shutdown(ctx))
|
||||
rm = collector.Collect().Dump()
|
||||
assert.Len(t, rm, 0, "client did not flush all data")
|
||||
})
|
||||
|
||||
t.Run("UploadMetrics", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client, coll := f()
|
||||
|
||||
require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
|
||||
require.NoError(t, client.Shutdown(ctx))
|
||||
got := coll.Collect().Dump()
|
||||
require.Len(t, got, 1, "upload of one ResourceMetrics")
|
||||
diff := cmp.Diff(got[0], resourceMetrics, cmp.Comparer(proto.Equal))
|
||||
if diff != "" {
|
||||
t.Fatalf("unexpected ResourceMetrics:\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testCtxErrs(factory func() func(context.Context) error) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
t.Helper()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
t.Run("DeadlineExceeded", func(t *testing.T) {
|
||||
innerCtx, innerCancel := context.WithTimeout(ctx, time.Nanosecond)
|
||||
t.Cleanup(innerCancel)
|
||||
<-innerCtx.Done()
|
||||
|
||||
f := factory()
|
||||
assert.ErrorIs(t, f(innerCtx), context.DeadlineExceeded)
|
||||
})
|
||||
|
||||
t.Run("Canceled", func(t *testing.T) {
|
||||
innerCtx, innerCancel := context.WithCancel(ctx)
|
||||
innerCancel()
|
||||
|
||||
f := factory()
|
||||
assert.ErrorIs(t, f(innerCtx), context.Canceled)
|
||||
})
|
||||
}
|
||||
}
|
||||
54
exporters/otlp/otlpmetric/internal/otest/client_test.go
Normal file
54
exporters/otlp/otlpmetric/internal/otest/client_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
cpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
type client struct {
|
||||
storage *Storage
|
||||
}
|
||||
|
||||
func (c *client) Collect() *Storage {
|
||||
return c.storage
|
||||
}
|
||||
|
||||
func (c *client) UploadMetrics(ctx context.Context, rm *mpb.ResourceMetrics) error {
|
||||
c.storage.Add(&cpb.ExportMetricsServiceRequest{
|
||||
ResourceMetrics: []*mpb.ResourceMetrics{rm},
|
||||
})
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
func (c *client) ForceFlush(ctx context.Context) error { return ctx.Err() }
|
||||
func (c *client) Shutdown(ctx context.Context) error { return ctx.Err() }
|
||||
|
||||
func TestClientTests(t *testing.T) {
|
||||
factory := func() (otlpmetric.Client, Collector) {
|
||||
c := &client{storage: NewStorage()}
|
||||
return c, c
|
||||
}
|
||||
|
||||
t.Run("Integration", RunClientTests(factory))
|
||||
}
|
||||
428
exporters/otlp/otlpmetric/internal/otest/collector.go
Normal file
428
exporters/otlp/otlpmetric/internal/otest/collector.go
Normal file
@@ -0,0 +1,428 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix" // nolint:depguard // This is for testing.
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
mathrand "math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
// Collector is the collection target a Client sends metric uploads to.
|
||||
type Collector interface {
|
||||
Collect() *Storage
|
||||
}
|
||||
|
||||
// Storage stores uploaded OTLP metric data in their proto form.
|
||||
type Storage struct {
|
||||
dataMu sync.Mutex
|
||||
data []*mpb.ResourceMetrics
|
||||
}
|
||||
|
||||
// NewStorage returns a configure storage ready to store received requests.
|
||||
func NewStorage() *Storage {
|
||||
return &Storage{}
|
||||
}
|
||||
|
||||
// Add adds the request to the Storage.
|
||||
func (s *Storage) Add(request *collpb.ExportMetricsServiceRequest) {
|
||||
s.dataMu.Lock()
|
||||
defer s.dataMu.Unlock()
|
||||
s.data = append(s.data, request.ResourceMetrics...)
|
||||
}
|
||||
|
||||
// Dump returns all added ResourceMetrics and clears the storage.
|
||||
func (s *Storage) Dump() []*mpb.ResourceMetrics {
|
||||
s.dataMu.Lock()
|
||||
defer s.dataMu.Unlock()
|
||||
|
||||
var data []*mpb.ResourceMetrics
|
||||
data, s.data = s.data, []*mpb.ResourceMetrics{}
|
||||
return data
|
||||
}
|
||||
|
||||
// GRPCCollector is an OTLP gRPC server that collects all requests it receives.
|
||||
type GRPCCollector struct {
|
||||
collpb.UnimplementedMetricsServiceServer
|
||||
|
||||
headersMu sync.Mutex
|
||||
headers metadata.MD
|
||||
storage *Storage
|
||||
|
||||
errCh <-chan error
|
||||
listener net.Listener
|
||||
srv *grpc.Server
|
||||
}
|
||||
|
||||
// NewGRPCCollector returns a *GRPCCollector that is listening at the provided
|
||||
// endpoint.
|
||||
//
|
||||
// If endpoint is an empty string, the returned collector will be listeing on
|
||||
// the localhost interface at an OS chosen port.
|
||||
//
|
||||
// If errCh is not nil, the collector will respond to Export calls with errors
|
||||
// sent on that channel. This means that if errCh is not nil Export calls will
|
||||
// block until an error is received.
|
||||
func NewGRPCCollector(endpoint string, errCh <-chan error) (*GRPCCollector, error) {
|
||||
if endpoint == "" {
|
||||
endpoint = "localhost:0"
|
||||
}
|
||||
|
||||
c := &GRPCCollector{
|
||||
storage: NewStorage(),
|
||||
errCh: errCh,
|
||||
}
|
||||
|
||||
var err error
|
||||
c.listener, err = net.Listen("tcp", endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.srv = grpc.NewServer()
|
||||
collpb.RegisterMetricsServiceServer(c.srv, c)
|
||||
go func() { _ = c.srv.Serve(c.listener) }()
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Shutdown shuts down the gRPC server closing all open connections and
|
||||
// listeners immediately.
|
||||
func (c *GRPCCollector) Shutdown() { c.srv.Stop() }
|
||||
|
||||
// Addr returns the net.Addr c is listening at.
|
||||
func (c *GRPCCollector) Addr() net.Addr {
|
||||
return c.listener.Addr()
|
||||
}
|
||||
|
||||
// Collect returns the Storage holding all collected requests.
|
||||
func (c *GRPCCollector) Collect() *Storage {
|
||||
return c.storage
|
||||
}
|
||||
|
||||
// Headers returns the headers received for all requests.
|
||||
func (c *GRPCCollector) Headers() map[string][]string {
|
||||
// Makes a copy.
|
||||
c.headersMu.Lock()
|
||||
defer c.headersMu.Unlock()
|
||||
return metadata.Join(c.headers)
|
||||
}
|
||||
|
||||
// Export handles the export req.
|
||||
func (c *GRPCCollector) Export(ctx context.Context, req *collpb.ExportMetricsServiceRequest) (*collpb.ExportMetricsServiceResponse, error) {
|
||||
c.storage.Add(req)
|
||||
|
||||
if h, ok := metadata.FromIncomingContext(ctx); ok {
|
||||
c.headersMu.Lock()
|
||||
c.headers = metadata.Join(c.headers, h)
|
||||
c.headersMu.Unlock()
|
||||
}
|
||||
|
||||
var err error
|
||||
if c.errCh != nil {
|
||||
err = <-c.errCh
|
||||
}
|
||||
return &collpb.ExportMetricsServiceResponse{}, err
|
||||
}
|
||||
|
||||
var emptyExportMetricsServiceResponse = func() []byte {
|
||||
body := collpb.ExportMetricsServiceResponse{}
|
||||
r, err := proto.Marshal(&body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return r
|
||||
}()
|
||||
|
||||
type HTTPResponseError struct {
|
||||
Err error
|
||||
Status int
|
||||
Header http.Header
|
||||
}
|
||||
|
||||
func (e *HTTPResponseError) Error() string {
|
||||
return fmt.Sprintf("%d: %s", e.Status, e.Err)
|
||||
}
|
||||
|
||||
func (e *HTTPResponseError) Unwrap() error { return e.Err }
|
||||
|
||||
// HTTPCollector is an OTLP HTTP server that collects all requests it receives.
|
||||
type HTTPCollector struct {
|
||||
headersMu sync.Mutex
|
||||
headers http.Header
|
||||
storage *Storage
|
||||
|
||||
errCh <-chan error
|
||||
listener net.Listener
|
||||
srv *http.Server
|
||||
}
|
||||
|
||||
// NewHTTPCollector returns a *HTTPCollector that is listening at the provided
|
||||
// endpoint.
|
||||
//
|
||||
// If endpoint is an empty string, the returned collector will be listeing on
|
||||
// the localhost interface at an OS chosen port, not use TLS, and listen at the
|
||||
// default OTLP metric endpoint path ("/v1/metrics"). If the endpoint contains
|
||||
// a prefix of "https" the server will generate weak self-signed TLS
|
||||
// certificates and use them to server data. If the endpoint contains a path,
|
||||
// that path will be used instead of the default OTLP metric endpoint path.
|
||||
//
|
||||
// If errCh is not nil, the collector will respond to HTTP requests with errors
|
||||
// sent on that channel. This means that if errCh is not nil Export calls will
|
||||
// block until an error is received.
|
||||
func NewHTTPCollector(endpoint string, errCh <-chan error) (*HTTPCollector, error) {
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if u.Host == "" {
|
||||
u.Host = "localhost:0"
|
||||
}
|
||||
if u.Path == "" {
|
||||
u.Path = oconf.DefaultMetricsPath
|
||||
}
|
||||
|
||||
c := &HTTPCollector{
|
||||
headers: http.Header{},
|
||||
storage: NewStorage(),
|
||||
errCh: errCh,
|
||||
}
|
||||
|
||||
c.listener, err = net.Listen("tcp", u.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle(u.Path, http.HandlerFunc(c.handler))
|
||||
c.srv = &http.Server{Handler: mux}
|
||||
if u.Scheme == "https" {
|
||||
cert, err := weakCertificate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.srv.TLSConfig = &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
}
|
||||
go func() { _ = c.srv.ServeTLS(c.listener, "", "") }()
|
||||
} else {
|
||||
go func() { _ = c.srv.Serve(c.listener) }()
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Shutdown shuts down the HTTP server closing all open connections and
|
||||
// listeners.
|
||||
func (c *HTTPCollector) Shutdown(ctx context.Context) error {
|
||||
return c.srv.Shutdown(ctx)
|
||||
}
|
||||
|
||||
// Addr returns the net.Addr c is listening at.
|
||||
func (c *HTTPCollector) Addr() net.Addr {
|
||||
return c.listener.Addr()
|
||||
}
|
||||
|
||||
// Collect returns the Storage holding all collected requests.
|
||||
func (c *HTTPCollector) Collect() *Storage {
|
||||
return c.storage
|
||||
}
|
||||
|
||||
// Headers returns the headers received for all requests.
|
||||
func (c *HTTPCollector) Headers() map[string][]string {
|
||||
// Makes a copy.
|
||||
c.headersMu.Lock()
|
||||
defer c.headersMu.Unlock()
|
||||
return c.headers.Clone()
|
||||
}
|
||||
|
||||
func (c *HTTPCollector) handler(w http.ResponseWriter, r *http.Request) {
|
||||
c.respond(w, c.record(r))
|
||||
}
|
||||
|
||||
func (c *HTTPCollector) record(r *http.Request) error {
|
||||
// Currently only supports protobuf.
|
||||
if v := r.Header.Get("Content-Type"); v != "application/x-protobuf" {
|
||||
return fmt.Errorf("content-type not supported: %s", v)
|
||||
}
|
||||
|
||||
body, err := c.readBody(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pbRequest := &collpb.ExportMetricsServiceRequest{}
|
||||
err = proto.Unmarshal(body, pbRequest)
|
||||
if err != nil {
|
||||
return &HTTPResponseError{
|
||||
Err: err,
|
||||
Status: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
c.storage.Add(pbRequest)
|
||||
|
||||
c.headersMu.Lock()
|
||||
for k, vals := range r.Header {
|
||||
for _, v := range vals {
|
||||
c.headers.Add(k, v)
|
||||
}
|
||||
}
|
||||
c.headersMu.Unlock()
|
||||
|
||||
if c.errCh != nil {
|
||||
err = <-c.errCh
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *HTTPCollector) readBody(r *http.Request) (body []byte, err error) {
|
||||
var reader io.ReadCloser
|
||||
switch r.Header.Get("Content-Encoding") {
|
||||
case "gzip":
|
||||
reader, err = gzip.NewReader(r.Body)
|
||||
if err != nil {
|
||||
_ = reader.Close()
|
||||
return nil, &HTTPResponseError{
|
||||
Err: err,
|
||||
Status: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
default:
|
||||
reader = r.Body
|
||||
}
|
||||
|
||||
defer func() {
|
||||
cErr := reader.Close()
|
||||
if err == nil && cErr != nil {
|
||||
err = &HTTPResponseError{
|
||||
Err: cErr,
|
||||
Status: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
}()
|
||||
body, err = io.ReadAll(reader)
|
||||
if err != nil {
|
||||
err = &HTTPResponseError{
|
||||
Err: err,
|
||||
Status: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
return body, err
|
||||
}
|
||||
|
||||
func (c *HTTPCollector) respond(w http.ResponseWriter, err error) {
|
||||
if err != nil {
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
var e *HTTPResponseError
|
||||
if errors.As(err, &e) {
|
||||
for k, vals := range e.Header {
|
||||
for _, v := range vals {
|
||||
w.Header().Add(k, v)
|
||||
}
|
||||
}
|
||||
w.WriteHeader(e.Status)
|
||||
fmt.Fprintln(w, e.Error())
|
||||
} else {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
fmt.Fprintln(w, err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/x-protobuf")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write(emptyExportMetricsServiceResponse)
|
||||
}
|
||||
|
||||
type mathRandReader struct{}
|
||||
|
||||
func (mathRandReader) Read(p []byte) (n int, err error) {
|
||||
return mathrand.Read(p)
|
||||
}
|
||||
|
||||
var randReader mathRandReader
|
||||
|
||||
// Based on https://golang.org/src/crypto/tls/generate_cert.go,
|
||||
// simplified and weakened.
|
||||
func weakCertificate() (tls.Certificate, error) {
|
||||
priv, err := ecdsa.GenerateKey(elliptic.P256(), randReader)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
notBefore := time.Now()
|
||||
notAfter := notBefore.Add(time.Hour)
|
||||
max := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||
sn, err := cryptorand.Int(randReader, max)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
tmpl := x509.Certificate{
|
||||
SerialNumber: sn,
|
||||
Subject: pkix.Name{Organization: []string{"otel-go"}},
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
BasicConstraintsValid: true,
|
||||
DNSNames: []string{"localhost"},
|
||||
IPAddresses: []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)},
|
||||
}
|
||||
derBytes, err := x509.CreateCertificate(randReader, &tmpl, &tmpl, &priv.PublicKey, priv)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
var certBuf bytes.Buffer
|
||||
err = pem.Encode(&certBuf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
if err != nil {
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
privBytes, err := x509.MarshalPKCS8PrivateKey(priv)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
var privBuf bytes.Buffer
|
||||
err = pem.Encode(&privBuf, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes})
|
||||
if err != nil {
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
return tls.X509KeyPair(certBuf.Bytes(), privBuf.Bytes())
|
||||
}
|
||||
@@ -1,132 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetrictest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
)
|
||||
|
||||
func RunExporterShutdownTest(t *testing.T, factory func() otlpmetric.Client) {
|
||||
t.Run("testClientStopHonorsTimeout", func(t *testing.T) {
|
||||
testClientStopHonorsTimeout(t, factory())
|
||||
})
|
||||
|
||||
t.Run("testClientStopHonorsCancel", func(t *testing.T) {
|
||||
testClientStopHonorsCancel(t, factory())
|
||||
})
|
||||
|
||||
t.Run("testClientStopNoError", func(t *testing.T) {
|
||||
testClientStopNoError(t, factory())
|
||||
})
|
||||
|
||||
t.Run("testClientStopManyTimes", func(t *testing.T) {
|
||||
testClientStopManyTimes(t, factory())
|
||||
})
|
||||
}
|
||||
|
||||
func initializeExporter(t *testing.T, client otlpmetric.Client) *otlpmetric.Exporter {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
e, err := otlpmetric.New(ctx, client)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create exporter")
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func testClientStopHonorsTimeout(t *testing.T, client otlpmetric.Client) {
|
||||
t.Cleanup(func() {
|
||||
// The test is looking for a failed shut down. Call Stop a second time
|
||||
// with an un-expired context to give the client a second chance at
|
||||
// cleaning up. There is not guarantee from the Client interface this
|
||||
// will succeed, therefore, no need to check the error (just give it a
|
||||
// best try).
|
||||
_ = client.Stop(context.Background())
|
||||
})
|
||||
e := initializeExporter(t, client)
|
||||
|
||||
innerCtx, innerCancel := context.WithTimeout(context.Background(), time.Microsecond)
|
||||
<-innerCtx.Done()
|
||||
if err := e.Shutdown(innerCtx); err == nil {
|
||||
t.Error("expected context DeadlineExceeded error, got nil")
|
||||
} else if !errors.Is(err, context.DeadlineExceeded) {
|
||||
t.Errorf("expected context DeadlineExceeded error, got %v", err)
|
||||
}
|
||||
innerCancel()
|
||||
}
|
||||
|
||||
func testClientStopHonorsCancel(t *testing.T, client otlpmetric.Client) {
|
||||
t.Cleanup(func() {
|
||||
// The test is looking for a failed shut down. Call Stop a second time
|
||||
// with an un-expired context to give the client a second chance at
|
||||
// cleaning up. There is not guarantee from the Client interface this
|
||||
// will succeed, therefore, no need to check the error (just give it a
|
||||
// best try).
|
||||
_ = client.Stop(context.Background())
|
||||
})
|
||||
e := initializeExporter(t, client)
|
||||
|
||||
ctx, innerCancel := context.WithCancel(context.Background())
|
||||
innerCancel()
|
||||
if err := e.Shutdown(ctx); err == nil {
|
||||
t.Error("expected context canceled error, got nil")
|
||||
} else if !errors.Is(err, context.Canceled) {
|
||||
t.Errorf("expected context canceled error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testClientStopNoError(t *testing.T, client otlpmetric.Client) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
e := initializeExporter(t, client)
|
||||
if err := e.Shutdown(ctx); err != nil {
|
||||
t.Errorf("shutdown errored: expected nil, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func testClientStopManyTimes(t *testing.T, client otlpmetric.Client) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||
defer cancel()
|
||||
e := initializeExporter(t, client)
|
||||
|
||||
ch := make(chan struct{})
|
||||
wg := sync.WaitGroup{}
|
||||
const num int = 20
|
||||
wg.Add(num)
|
||||
errs := make([]error, num)
|
||||
for i := 0; i < num; i++ {
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
<-ch
|
||||
errs[idx] = e.Shutdown(ctx)
|
||||
}(i)
|
||||
}
|
||||
close(ch)
|
||||
wg.Wait()
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
t.Fatalf("failed to shutdown exporter: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetrictest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest"
|
||||
|
||||
import (
|
||||
collectormetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
// Collector is an interface that mock collectors should implements,
|
||||
// so they can be used for the end-to-end testing.
|
||||
type Collector interface {
|
||||
Stop() error
|
||||
GetMetrics() []*metricpb.Metric
|
||||
}
|
||||
|
||||
// MetricsStorage stores the metrics. Mock collectors could use it to
|
||||
// store metrics they have received.
|
||||
type MetricsStorage struct {
|
||||
metrics []*metricpb.Metric
|
||||
}
|
||||
|
||||
// NewMetricsStorage creates a new metrics storage.
|
||||
func NewMetricsStorage() MetricsStorage {
|
||||
return MetricsStorage{}
|
||||
}
|
||||
|
||||
// AddMetrics adds metrics to the metrics storage.
|
||||
func (s *MetricsStorage) AddMetrics(request *collectormetricpb.ExportMetricsServiceRequest) {
|
||||
for _, rm := range request.GetResourceMetrics() {
|
||||
// TODO (rghetia) handle multiple resource and library info.
|
||||
if len(rm.ScopeMetrics) > 0 {
|
||||
s.metrics = append(s.metrics, rm.ScopeMetrics[0].Metrics...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetrics returns the stored metrics.
|
||||
func (s *MetricsStorage) GetMetrics() []*metricpb.Metric {
|
||||
// copy in order to not change.
|
||||
m := make([]*metricpb.Metric, 0, len(s.metrics))
|
||||
return append(m, s.metrics...)
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetrictest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/sum"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metrictest"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
"go.opentelemetry.io/otel/sdk/metric/processor/processortest"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
)
|
||||
|
||||
// OneRecordReader is a Reader that returns just one
|
||||
// filled record. It may be useful for testing driver's metrics
|
||||
// export.
|
||||
func OneRecordReader() export.InstrumentationLibraryReader {
|
||||
desc := metrictest.NewDescriptor(
|
||||
"foo",
|
||||
sdkapi.CounterInstrumentKind,
|
||||
number.Int64Kind,
|
||||
)
|
||||
agg := sum.New(1)
|
||||
if err := agg[0].Update(context.Background(), number.NewInt64Number(42), &desc); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
start := time.Date(2020, time.December, 8, 19, 15, 0, 0, time.UTC)
|
||||
end := time.Date(2020, time.December, 8, 19, 16, 0, 0, time.UTC)
|
||||
attrs := attribute.NewSet(attribute.String("abc", "def"), attribute.Int64("one", 1))
|
||||
rec := export.NewRecord(&desc, &attrs, agg[0].Aggregation(), start, end)
|
||||
|
||||
return processortest.MultiInstrumentationLibraryReader(
|
||||
map[instrumentation.Library][]export.Record{
|
||||
{
|
||||
Name: "onelib",
|
||||
}: {rec},
|
||||
})
|
||||
}
|
||||
|
||||
func EmptyReader() export.InstrumentationLibraryReader {
|
||||
return processortest.MultiInstrumentationLibraryReader(nil)
|
||||
}
|
||||
|
||||
// FailReader is a checkpointer that returns an error during
|
||||
// ForEach.
|
||||
type FailReader struct{}
|
||||
|
||||
var _ export.InstrumentationLibraryReader = FailReader{}
|
||||
|
||||
// ForEach implements export.Reader. It always fails.
|
||||
func (FailReader) ForEach(readerFunc func(instrumentation.Library, export.Reader) error) error {
|
||||
return fmt.Errorf("fail")
|
||||
}
|
||||
@@ -1,174 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetrictest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
// RunEndToEndTest can be used by protocol driver tests to validate
|
||||
// themselves.
|
||||
func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlpmetric.Exporter, mcMetrics Collector) {
|
||||
selector := simple.NewWithHistogramDistribution()
|
||||
proc := processor.NewFactory(selector, aggregation.StatelessTemporalitySelector())
|
||||
cont := controller.New(proc, controller.WithExporter(exp))
|
||||
require.NoError(t, cont.Start(ctx))
|
||||
|
||||
meter := cont.Meter("test-meter")
|
||||
attrs := []attribute.KeyValue{attribute.Bool("test", true)}
|
||||
|
||||
type data struct {
|
||||
iKind sdkapi.InstrumentKind
|
||||
nKind number.Kind
|
||||
val int64
|
||||
}
|
||||
instruments := map[string]data{
|
||||
"test-int64-counter": {sdkapi.CounterInstrumentKind, number.Int64Kind, 1},
|
||||
"test-float64-counter": {sdkapi.CounterInstrumentKind, number.Float64Kind, 1},
|
||||
"test-int64-histogram": {sdkapi.HistogramInstrumentKind, number.Int64Kind, 2},
|
||||
"test-float64-histogram": {sdkapi.HistogramInstrumentKind, number.Float64Kind, 2},
|
||||
"test-int64-gaugeobserver": {sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, 3},
|
||||
"test-float64-gaugeobserver": {sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, 3},
|
||||
}
|
||||
for name, data := range instruments {
|
||||
data := data
|
||||
switch data.iKind {
|
||||
case sdkapi.CounterInstrumentKind:
|
||||
switch data.nKind {
|
||||
case number.Int64Kind:
|
||||
c, _ := meter.SyncInt64().Counter(name)
|
||||
c.Add(ctx, data.val, attrs...)
|
||||
case number.Float64Kind:
|
||||
c, _ := meter.SyncFloat64().Counter(name)
|
||||
c.Add(ctx, float64(data.val), attrs...)
|
||||
default:
|
||||
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
|
||||
}
|
||||
case sdkapi.HistogramInstrumentKind:
|
||||
switch data.nKind {
|
||||
case number.Int64Kind:
|
||||
c, _ := meter.SyncInt64().Histogram(name)
|
||||
c.Record(ctx, data.val, attrs...)
|
||||
case number.Float64Kind:
|
||||
c, _ := meter.SyncFloat64().Histogram(name)
|
||||
c.Record(ctx, float64(data.val), attrs...)
|
||||
default:
|
||||
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
|
||||
}
|
||||
case sdkapi.GaugeObserverInstrumentKind:
|
||||
switch data.nKind {
|
||||
case number.Int64Kind:
|
||||
g, _ := meter.AsyncInt64().Gauge(name)
|
||||
_ = meter.RegisterCallback([]instrument.Asynchronous{g}, func(ctx context.Context) {
|
||||
g.Observe(ctx, data.val, attrs...)
|
||||
})
|
||||
case number.Float64Kind:
|
||||
g, _ := meter.AsyncFloat64().Gauge(name)
|
||||
_ = meter.RegisterCallback([]instrument.Asynchronous{g}, func(ctx context.Context) {
|
||||
g.Observe(ctx, float64(data.val), attrs...)
|
||||
})
|
||||
default:
|
||||
assert.Failf(t, "unsupported number testing kind", data.nKind.String())
|
||||
}
|
||||
default:
|
||||
assert.Failf(t, "unsupported metrics testing kind", data.iKind.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Flush and close.
|
||||
require.NoError(t, cont.Stop(ctx))
|
||||
|
||||
// Wait >2 cycles.
|
||||
<-time.After(40 * time.Millisecond)
|
||||
|
||||
// Now shutdown the exporter
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
if err := exp.Shutdown(ctx); err != nil {
|
||||
t.Fatalf("failed to stop the exporter: %v", err)
|
||||
}
|
||||
|
||||
// Shutdown the collector too so that we can begin
|
||||
// verification checks of expected data back.
|
||||
_ = mcMetrics.Stop()
|
||||
|
||||
metrics := mcMetrics.GetMetrics()
|
||||
assert.Len(t, metrics, len(instruments), "not enough metrics exported")
|
||||
seen := make(map[string]struct{}, len(instruments))
|
||||
for _, m := range metrics {
|
||||
data, ok := instruments[m.Name]
|
||||
if !ok {
|
||||
assert.Failf(t, "unknown metrics", m.Name)
|
||||
continue
|
||||
}
|
||||
seen[m.Name] = struct{}{}
|
||||
|
||||
switch data.iKind {
|
||||
case sdkapi.CounterInstrumentKind, sdkapi.GaugeObserverInstrumentKind:
|
||||
var dp []*metricpb.NumberDataPoint
|
||||
switch data.iKind {
|
||||
case sdkapi.CounterInstrumentKind:
|
||||
require.NotNil(t, m.GetSum())
|
||||
dp = m.GetSum().GetDataPoints()
|
||||
case sdkapi.GaugeObserverInstrumentKind:
|
||||
require.NotNil(t, m.GetGauge())
|
||||
dp = m.GetGauge().GetDataPoints()
|
||||
}
|
||||
if assert.Len(t, dp, 1) {
|
||||
switch data.nKind {
|
||||
case number.Int64Kind:
|
||||
v := &metricpb.NumberDataPoint_AsInt{AsInt: data.val}
|
||||
assert.Equal(t, v, dp[0].Value, "invalid value for %q", m.Name)
|
||||
case number.Float64Kind:
|
||||
v := &metricpb.NumberDataPoint_AsDouble{AsDouble: float64(data.val)}
|
||||
assert.Equal(t, v, dp[0].Value, "invalid value for %q", m.Name)
|
||||
}
|
||||
}
|
||||
case sdkapi.HistogramInstrumentKind:
|
||||
require.NotNil(t, m.GetHistogram())
|
||||
if dp := m.GetHistogram().DataPoints; assert.Len(t, dp, 1) {
|
||||
count := dp[0].Count
|
||||
assert.Equal(t, uint64(1), count, "invalid count for %q", m.Name)
|
||||
require.NotNil(t, dp[0].Sum)
|
||||
assert.Equal(t, float64(data.val*int64(count)), *dp[0].Sum, "invalid sum for %q (value %d)", m.Name, data.val)
|
||||
}
|
||||
default:
|
||||
assert.Failf(t, "invalid metrics kind", data.iKind.String())
|
||||
}
|
||||
}
|
||||
|
||||
for i := range instruments {
|
||||
if _, ok := seen[i]; !ok {
|
||||
assert.Fail(t, fmt.Sprintf("no metric(s) exported for %q", i))
|
||||
}
|
||||
}
|
||||
}
|
||||
155
exporters/otlp/otlpmetric/internal/transform/attribute.go
Normal file
155
exporters/otlp/otlpmetric/internal/transform/attribute.go
Normal file
@@ -0,0 +1,155 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
)
|
||||
|
||||
// AttrIter transforms an attribute iterator into OTLP key-values.
|
||||
func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
|
||||
l := iter.Len()
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]*cpb.KeyValue, 0, l)
|
||||
for iter.Next() {
|
||||
out = append(out, KeyValue(iter.Attribute()))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
|
||||
func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
|
||||
if len(attrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]*cpb.KeyValue, 0, len(attrs))
|
||||
for _, kv := range attrs {
|
||||
out = append(out, KeyValue(kv))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// KeyValue transforms an attribute KeyValue into an OTLP key-value.
|
||||
func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
|
||||
return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
|
||||
}
|
||||
|
||||
// Value transforms an attribute Value into an OTLP AnyValue.
|
||||
func Value(v attribute.Value) *cpb.AnyValue {
|
||||
av := new(cpb.AnyValue)
|
||||
switch v.Type() {
|
||||
case attribute.BOOL:
|
||||
av.Value = &cpb.AnyValue_BoolValue{
|
||||
BoolValue: v.AsBool(),
|
||||
}
|
||||
case attribute.BOOLSLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: boolSliceValues(v.AsBoolSlice()),
|
||||
},
|
||||
}
|
||||
case attribute.INT64:
|
||||
av.Value = &cpb.AnyValue_IntValue{
|
||||
IntValue: v.AsInt64(),
|
||||
}
|
||||
case attribute.INT64SLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: int64SliceValues(v.AsInt64Slice()),
|
||||
},
|
||||
}
|
||||
case attribute.FLOAT64:
|
||||
av.Value = &cpb.AnyValue_DoubleValue{
|
||||
DoubleValue: v.AsFloat64(),
|
||||
}
|
||||
case attribute.FLOAT64SLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: float64SliceValues(v.AsFloat64Slice()),
|
||||
},
|
||||
}
|
||||
case attribute.STRING:
|
||||
av.Value = &cpb.AnyValue_StringValue{
|
||||
StringValue: v.AsString(),
|
||||
}
|
||||
case attribute.STRINGSLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: stringSliceValues(v.AsStringSlice()),
|
||||
},
|
||||
}
|
||||
default:
|
||||
av.Value = &cpb.AnyValue_StringValue{
|
||||
StringValue: "INVALID",
|
||||
}
|
||||
}
|
||||
return av
|
||||
}
|
||||
|
||||
func boolSliceValues(vals []bool) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_BoolValue{
|
||||
BoolValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func int64SliceValues(vals []int64) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_IntValue{
|
||||
IntValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func float64SliceValues(vals []float64) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_DoubleValue{
|
||||
DoubleValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func stringSliceValues(vals []string) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_StringValue{
|
||||
StringValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
197
exporters/otlp/otlpmetric/internal/transform/attribute_test.go
Normal file
197
exporters/otlp/otlpmetric/internal/transform/attribute_test.go
Normal file
@@ -0,0 +1,197 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
attrBool = attribute.Bool("bool", true)
|
||||
attrBoolSlice = attribute.BoolSlice("bool slice", []bool{true, false})
|
||||
attrInt = attribute.Int("int", 1)
|
||||
attrIntSlice = attribute.IntSlice("int slice", []int{-1, 1})
|
||||
attrInt64 = attribute.Int64("int64", 1)
|
||||
attrInt64Slice = attribute.Int64Slice("int64 slice", []int64{-1, 1})
|
||||
attrFloat64 = attribute.Float64("float64", 1)
|
||||
attrFloat64Slice = attribute.Float64Slice("float64 slice", []float64{-1, 1})
|
||||
attrString = attribute.String("string", "o")
|
||||
attrStringSlice = attribute.StringSlice("string slice", []string{"o", "n"})
|
||||
attrInvalid = attribute.KeyValue{
|
||||
Key: attribute.Key("invalid"),
|
||||
Value: attribute.Value{},
|
||||
}
|
||||
|
||||
valBoolTrue = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: true}}
|
||||
valBoolFalse = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: false}}
|
||||
valBoolSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: []*cpb.AnyValue{valBoolTrue, valBoolFalse},
|
||||
},
|
||||
}}
|
||||
valIntOne = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: 1}}
|
||||
valIntNOne = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: -1}}
|
||||
valIntSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: []*cpb.AnyValue{valIntNOne, valIntOne},
|
||||
},
|
||||
}}
|
||||
valDblOne = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: 1}}
|
||||
valDblNOne = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: -1}}
|
||||
valDblSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: []*cpb.AnyValue{valDblNOne, valDblOne},
|
||||
},
|
||||
}}
|
||||
valStrO = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "o"}}
|
||||
valStrN = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "n"}}
|
||||
valStrSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: []*cpb.AnyValue{valStrO, valStrN},
|
||||
},
|
||||
}}
|
||||
|
||||
kvBool = &cpb.KeyValue{Key: "bool", Value: valBoolTrue}
|
||||
kvBoolSlice = &cpb.KeyValue{Key: "bool slice", Value: valBoolSlice}
|
||||
kvInt = &cpb.KeyValue{Key: "int", Value: valIntOne}
|
||||
kvIntSlice = &cpb.KeyValue{Key: "int slice", Value: valIntSlice}
|
||||
kvInt64 = &cpb.KeyValue{Key: "int64", Value: valIntOne}
|
||||
kvInt64Slice = &cpb.KeyValue{Key: "int64 slice", Value: valIntSlice}
|
||||
kvFloat64 = &cpb.KeyValue{Key: "float64", Value: valDblOne}
|
||||
kvFloat64Slice = &cpb.KeyValue{Key: "float64 slice", Value: valDblSlice}
|
||||
kvString = &cpb.KeyValue{Key: "string", Value: valStrO}
|
||||
kvStringSlice = &cpb.KeyValue{Key: "string slice", Value: valStrSlice}
|
||||
kvInvalid = &cpb.KeyValue{
|
||||
Key: "invalid",
|
||||
Value: &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_StringValue{StringValue: "INVALID"},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
type attributeTest struct {
|
||||
name string
|
||||
in []attribute.KeyValue
|
||||
want []*cpb.KeyValue
|
||||
}
|
||||
|
||||
func TestAttributeTransforms(t *testing.T) {
|
||||
for _, test := range []attributeTest{
|
||||
{"nil", nil, nil},
|
||||
{"empty", []attribute.KeyValue{}, nil},
|
||||
{
|
||||
"invalid",
|
||||
[]attribute.KeyValue{attrInvalid},
|
||||
[]*cpb.KeyValue{kvInvalid},
|
||||
},
|
||||
{
|
||||
"bool",
|
||||
[]attribute.KeyValue{attrBool},
|
||||
[]*cpb.KeyValue{kvBool},
|
||||
},
|
||||
{
|
||||
"bool slice",
|
||||
[]attribute.KeyValue{attrBoolSlice},
|
||||
[]*cpb.KeyValue{kvBoolSlice},
|
||||
},
|
||||
{
|
||||
"int",
|
||||
[]attribute.KeyValue{attrInt},
|
||||
[]*cpb.KeyValue{kvInt},
|
||||
},
|
||||
{
|
||||
"int slice",
|
||||
[]attribute.KeyValue{attrIntSlice},
|
||||
[]*cpb.KeyValue{kvIntSlice},
|
||||
},
|
||||
{
|
||||
"int64",
|
||||
[]attribute.KeyValue{attrInt64},
|
||||
[]*cpb.KeyValue{kvInt64},
|
||||
},
|
||||
{
|
||||
"int64 slice",
|
||||
[]attribute.KeyValue{attrInt64Slice},
|
||||
[]*cpb.KeyValue{kvInt64Slice},
|
||||
},
|
||||
{
|
||||
"float64",
|
||||
[]attribute.KeyValue{attrFloat64},
|
||||
[]*cpb.KeyValue{kvFloat64},
|
||||
},
|
||||
{
|
||||
"float64 slice",
|
||||
[]attribute.KeyValue{attrFloat64Slice},
|
||||
[]*cpb.KeyValue{kvFloat64Slice},
|
||||
},
|
||||
{
|
||||
"string",
|
||||
[]attribute.KeyValue{attrString},
|
||||
[]*cpb.KeyValue{kvString},
|
||||
},
|
||||
{
|
||||
"string slice",
|
||||
[]attribute.KeyValue{attrStringSlice},
|
||||
[]*cpb.KeyValue{kvStringSlice},
|
||||
},
|
||||
{
|
||||
"all",
|
||||
[]attribute.KeyValue{
|
||||
attrBool,
|
||||
attrBoolSlice,
|
||||
attrInt,
|
||||
attrIntSlice,
|
||||
attrInt64,
|
||||
attrInt64Slice,
|
||||
attrFloat64,
|
||||
attrFloat64Slice,
|
||||
attrString,
|
||||
attrStringSlice,
|
||||
attrInvalid,
|
||||
},
|
||||
[]*cpb.KeyValue{
|
||||
kvBool,
|
||||
kvBoolSlice,
|
||||
kvInt,
|
||||
kvIntSlice,
|
||||
kvInt64,
|
||||
kvInt64Slice,
|
||||
kvFloat64,
|
||||
kvFloat64Slice,
|
||||
kvString,
|
||||
kvStringSlice,
|
||||
kvInvalid,
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Run("KeyValues", func(t *testing.T) {
|
||||
assert.ElementsMatch(t, test.want, KeyValues(test.in))
|
||||
})
|
||||
t.Run("AttrIter", func(t *testing.T) {
|
||||
s := attribute.NewSet(test.in...)
|
||||
assert.ElementsMatch(t, test.want, AttrIter(s.Iter()))
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -12,14 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// Deprecated: will be removed soon.
|
||||
func AtomicFieldOffsets() map[string]uintptr {
|
||||
return map[string]uintptr{
|
||||
"record.refMapped.value": unsafe.Offsetof(record{}.refMapped.value),
|
||||
"record.updateCount": unsafe.Offsetof(record{}.updateCount),
|
||||
}
|
||||
}
|
||||
// Package transform provides transformation functionality from the
|
||||
// sdk/metric/metricdata data-types into OTLP data-types.
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
|
||||
114
exporters/otlp/otlpmetric/internal/transform/error.go
Normal file
114
exporters/otlp/otlpmetric/internal/transform/error.go
Normal file
@@ -0,0 +1,114 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
errUnknownAggregation = errors.New("unknown aggregation")
|
||||
errUnknownTemporality = errors.New("unknown temporality")
|
||||
)
|
||||
|
||||
type errMetric struct {
|
||||
m *mpb.Metric
|
||||
err error
|
||||
}
|
||||
|
||||
func (e errMetric) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
func (e errMetric) Error() string {
|
||||
format := "invalid metric (name: %q, description: %q, unit: %q): %s"
|
||||
return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
|
||||
}
|
||||
|
||||
func (e errMetric) Is(target error) bool {
|
||||
return errors.Is(e.err, target)
|
||||
}
|
||||
|
||||
// multiErr is used by the data-type transform functions to wrap multiple
|
||||
// errors into a single return value. The error message will show all errors
|
||||
// as a list and scope them by the datatype name that is returning them.
|
||||
type multiErr struct {
|
||||
datatype string
|
||||
errs []error
|
||||
}
|
||||
|
||||
// errOrNil returns nil if e contains no errors, otherwise it returns e.
|
||||
func (e *multiErr) errOrNil() error {
|
||||
if len(e.errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// append adds err to e. If err is a multiErr, its errs are flattened into e.
|
||||
func (e *multiErr) append(err error) {
|
||||
// Do not use errors.As here, this should only be flattened one layer. If
|
||||
// there is a *multiErr several steps down the chain, all the errors above
|
||||
// it will be discarded if errors.As is used instead.
|
||||
switch other := err.(type) {
|
||||
case *multiErr:
|
||||
// Flatten err errors into e.
|
||||
e.errs = append(e.errs, other.errs...)
|
||||
default:
|
||||
e.errs = append(e.errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *multiErr) Error() string {
|
||||
es := make([]string, len(e.errs))
|
||||
for i, err := range e.errs {
|
||||
es[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
format := "%d errors occurred transforming %s:\n\t%s"
|
||||
return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
|
||||
}
|
||||
|
||||
func (e *multiErr) Unwrap() error {
|
||||
switch len(e.errs) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return e.errs[0]
|
||||
}
|
||||
|
||||
// Return a multiErr without the leading error.
|
||||
cp := &multiErr{
|
||||
datatype: e.datatype,
|
||||
errs: make([]error, len(e.errs)-1),
|
||||
}
|
||||
copy(cp.errs, e.errs[1:])
|
||||
return cp
|
||||
}
|
||||
|
||||
func (e *multiErr) Is(target error) bool {
|
||||
if len(e.errs) == 0 {
|
||||
return false
|
||||
}
|
||||
// Check if the first error is target.
|
||||
return errors.Is(e.errs[0], target)
|
||||
}
|
||||
91
exporters/otlp/otlpmetric/internal/transform/error_test.go
Normal file
91
exporters/otlp/otlpmetric/internal/transform/error_test.go
Normal file
@@ -0,0 +1,91 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
e0 = errMetric{m: pbMetrics[0], err: errUnknownAggregation}
|
||||
e1 = errMetric{m: pbMetrics[1], err: errUnknownTemporality}
|
||||
)
|
||||
|
||||
type testingErr struct{}
|
||||
|
||||
func (testingErr) Error() string { return "testing error" }
|
||||
|
||||
// errFunc is a non-comparable error type.
|
||||
type errFunc func() string
|
||||
|
||||
func (e errFunc) Error() string {
|
||||
return e()
|
||||
}
|
||||
|
||||
func TestMultiErr(t *testing.T) {
|
||||
const name = "TestMultiErr"
|
||||
me := &multiErr{datatype: name}
|
||||
|
||||
t.Run("ErrOrNil", func(t *testing.T) {
|
||||
require.Nil(t, me.errOrNil())
|
||||
me.errs = []error{e0}
|
||||
assert.Error(t, me.errOrNil())
|
||||
})
|
||||
|
||||
var testErr testingErr
|
||||
t.Run("AppendError", func(t *testing.T) {
|
||||
me.append(testErr)
|
||||
assert.Equal(t, testErr, me.errs[len(me.errs)-1])
|
||||
})
|
||||
|
||||
t.Run("AppendFlattens", func(t *testing.T) {
|
||||
other := &multiErr{datatype: "OtherTestMultiErr", errs: []error{e1}}
|
||||
me.append(other)
|
||||
assert.Equal(t, e1, me.errs[len(me.errs)-1])
|
||||
})
|
||||
|
||||
t.Run("ErrorMessage", func(t *testing.T) {
|
||||
// Test the overall structure of the message, but not the exact
|
||||
// language so this doesn't become a change-indicator.
|
||||
msg := me.Error()
|
||||
lines := strings.Split(msg, "\n")
|
||||
assert.Equalf(t, 4, len(lines), "expected a 4 line error message, got:\n\n%s", msg)
|
||||
assert.Contains(t, msg, name)
|
||||
assert.Contains(t, msg, e0.Error())
|
||||
assert.Contains(t, msg, testErr.Error())
|
||||
assert.Contains(t, msg, e1.Error())
|
||||
})
|
||||
|
||||
t.Run("ErrorIs", func(t *testing.T) {
|
||||
assert.ErrorIs(t, me, errUnknownAggregation)
|
||||
assert.ErrorIs(t, me, e0)
|
||||
assert.ErrorIs(t, me, testErr)
|
||||
assert.ErrorIs(t, me, errUnknownTemporality)
|
||||
assert.ErrorIs(t, me, e1)
|
||||
|
||||
errUnknown := errFunc(func() string { return "unknown error" })
|
||||
assert.NotErrorIs(t, me, errUnknown)
|
||||
|
||||
var empty multiErr
|
||||
assert.NotErrorIs(t, &empty, errUnknownTemporality)
|
||||
})
|
||||
}
|
||||
207
exporters/otlp/otlpmetric/internal/transform/metricdata.go
Normal file
207
exporters/otlp/otlpmetric/internal/transform/metricdata.go
Normal file
@@ -0,0 +1,207 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
rpb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||
)
|
||||
|
||||
// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
|
||||
// contains invalid ScopeMetrics, an error will be returned along with an OTLP
|
||||
// ResourceMetrics that contains partial OTLP ScopeMetrics.
|
||||
func ResourceMetrics(rm metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
|
||||
sms, err := ScopeMetrics(rm.ScopeMetrics)
|
||||
return &mpb.ResourceMetrics{
|
||||
Resource: &rpb.Resource{
|
||||
Attributes: AttrIter(rm.Resource.Iter()),
|
||||
},
|
||||
ScopeMetrics: sms,
|
||||
SchemaUrl: rm.Resource.SchemaURL(),
|
||||
}, err
|
||||
}
|
||||
|
||||
// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
|
||||
// sms contains invalid metric values, an error will be returned along with a
|
||||
// slice that contains partial OTLP ScopeMetrics.
|
||||
func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
|
||||
errs := &multiErr{datatype: "ScopeMetrics"}
|
||||
out := make([]*mpb.ScopeMetrics, 0, len(sms))
|
||||
for _, sm := range sms {
|
||||
ms, err := Metrics(sm.Metrics)
|
||||
if err != nil {
|
||||
errs.append(err)
|
||||
}
|
||||
|
||||
out = append(out, &mpb.ScopeMetrics{
|
||||
Scope: &cpb.InstrumentationScope{
|
||||
Name: sm.Scope.Name,
|
||||
Version: sm.Scope.Version,
|
||||
},
|
||||
Metrics: ms,
|
||||
SchemaUrl: sm.Scope.SchemaURL,
|
||||
})
|
||||
}
|
||||
return out, errs.errOrNil()
|
||||
}
|
||||
|
||||
// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
|
||||
// invalid metric values, an error will be returned along with a slice that
|
||||
// contains partial OTLP Metrics.
|
||||
func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
|
||||
errs := &multiErr{datatype: "Metrics"}
|
||||
out := make([]*mpb.Metric, 0, len(ms))
|
||||
for _, m := range ms {
|
||||
o, err := metric(m)
|
||||
if err != nil {
|
||||
// Do not include invalid data. Drop the metric, report the error.
|
||||
errs.append(errMetric{m: o, err: err})
|
||||
continue
|
||||
}
|
||||
out = append(out, o)
|
||||
}
|
||||
return out, errs.errOrNil()
|
||||
}
|
||||
|
||||
func metric(m metricdata.Metrics) (*mpb.Metric, error) {
|
||||
var err error
|
||||
out := &mpb.Metric{
|
||||
Name: m.Name,
|
||||
Description: m.Description,
|
||||
Unit: string(m.Unit),
|
||||
}
|
||||
switch a := m.Data.(type) {
|
||||
case metricdata.Gauge[int64]:
|
||||
out.Data = Gauge[int64](a)
|
||||
case metricdata.Gauge[float64]:
|
||||
out.Data = Gauge[float64](a)
|
||||
case metricdata.Sum[int64]:
|
||||
out.Data, err = Sum[int64](a)
|
||||
case metricdata.Sum[float64]:
|
||||
out.Data, err = Sum[float64](a)
|
||||
case metricdata.Histogram:
|
||||
out.Data, err = Histogram(a)
|
||||
default:
|
||||
return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
// Gauge returns an OTLP Metric_Gauge generated from g.
|
||||
func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
|
||||
return &mpb.Metric_Gauge{
|
||||
Gauge: &mpb.Gauge{
|
||||
DataPoints: DataPoints(g.DataPoints),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Sum returns an OTLP Metric_Sum generated from s. An error is returned with
|
||||
// a partial Metric_Sum if the temporality of s is unknown.
|
||||
func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
|
||||
t, err := Temporality(s.Temporality)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mpb.Metric_Sum{
|
||||
Sum: &mpb.Sum{
|
||||
AggregationTemporality: t,
|
||||
IsMonotonic: s.IsMonotonic,
|
||||
DataPoints: DataPoints(s.DataPoints),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
|
||||
func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
|
||||
out := make([]*mpb.NumberDataPoint, 0, len(dPts))
|
||||
for _, dPt := range dPts {
|
||||
ndp := &mpb.NumberDataPoint{
|
||||
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||
StartTimeUnixNano: uint64(dPt.StartTime.UnixNano()),
|
||||
TimeUnixNano: uint64(dPt.Time.UnixNano()),
|
||||
}
|
||||
switch v := any(dPt.Value).(type) {
|
||||
case int64:
|
||||
ndp.Value = &mpb.NumberDataPoint_AsInt{
|
||||
AsInt: v,
|
||||
}
|
||||
case float64:
|
||||
ndp.Value = &mpb.NumberDataPoint_AsDouble{
|
||||
AsDouble: v,
|
||||
}
|
||||
}
|
||||
out = append(out, ndp)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Histogram returns an OTLP Metric_Histogram generated from h. An error is
|
||||
// returned with a partial Metric_Histogram if the temporality of h is
|
||||
// unknown.
|
||||
func Histogram(h metricdata.Histogram) (*mpb.Metric_Histogram, error) {
|
||||
t, err := Temporality(h.Temporality)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mpb.Metric_Histogram{
|
||||
Histogram: &mpb.Histogram{
|
||||
AggregationTemporality: t,
|
||||
DataPoints: HistogramDataPoints(h.DataPoints),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
|
||||
// from dPts.
|
||||
func HistogramDataPoints(dPts []metricdata.HistogramDataPoint) []*mpb.HistogramDataPoint {
|
||||
out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
|
||||
for _, dPt := range dPts {
|
||||
out = append(out, &mpb.HistogramDataPoint{
|
||||
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||
StartTimeUnixNano: uint64(dPt.StartTime.UnixNano()),
|
||||
TimeUnixNano: uint64(dPt.Time.UnixNano()),
|
||||
Count: dPt.Count,
|
||||
Sum: &dPt.Sum,
|
||||
BucketCounts: dPt.BucketCounts,
|
||||
ExplicitBounds: dPt.Bounds,
|
||||
Min: dPt.Min,
|
||||
Max: dPt.Max,
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Temporality returns an OTLP AggregationTemporality generated from t. If t
|
||||
// is unknown, an error is returned along with the invalid
|
||||
// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
|
||||
func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
|
||||
switch t {
|
||||
case metricdata.DeltaTemporality:
|
||||
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
|
||||
case metricdata.CumulativeTemporality:
|
||||
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
|
||||
default:
|
||||
err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
|
||||
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
|
||||
}
|
||||
}
|
||||
355
exporters/otlp/otlpmetric/internal/transform/metricdata_test.go
Normal file
355
exporters/otlp/otlpmetric/internal/transform/metricdata_test.go
Normal file
@@ -0,0 +1,355 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/unit"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
|
||||
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
rpb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||
)
|
||||
|
||||
type unknownAggT struct {
|
||||
metricdata.Aggregation
|
||||
}
|
||||
|
||||
var (
|
||||
// Sat Jan 01 2000 00:00:00 GMT+0000.
|
||||
start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
|
||||
end = start.Add(30 * time.Second)
|
||||
|
||||
alice = attribute.NewSet(attribute.String("user", "alice"))
|
||||
bob = attribute.NewSet(attribute.String("user", "bob"))
|
||||
|
||||
pbAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_StringValue{StringValue: "alice"},
|
||||
}}
|
||||
pbBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_StringValue{StringValue: "bob"},
|
||||
}}
|
||||
|
||||
min, max, sum = 2.0, 4.0, 90.0
|
||||
otelHDP = []metricdata.HistogramDataPoint{{
|
||||
Attributes: alice,
|
||||
StartTime: start,
|
||||
Time: end,
|
||||
Count: 30,
|
||||
Bounds: []float64{1, 5},
|
||||
BucketCounts: []uint64{0, 30, 0},
|
||||
Min: &min,
|
||||
Max: &max,
|
||||
Sum: sum,
|
||||
}}
|
||||
|
||||
pbHDP = []*mpb.HistogramDataPoint{{
|
||||
Attributes: []*cpb.KeyValue{pbAlice},
|
||||
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||
TimeUnixNano: uint64(end.UnixNano()),
|
||||
Count: 30,
|
||||
Sum: &sum,
|
||||
ExplicitBounds: []float64{1, 5},
|
||||
BucketCounts: []uint64{0, 30, 0},
|
||||
Min: &min,
|
||||
Max: &max,
|
||||
}}
|
||||
|
||||
otelHist = metricdata.Histogram{
|
||||
Temporality: metricdata.DeltaTemporality,
|
||||
DataPoints: otelHDP,
|
||||
}
|
||||
invalidTemporality metricdata.Temporality
|
||||
otelHistInvalid = metricdata.Histogram{
|
||||
Temporality: invalidTemporality,
|
||||
DataPoints: otelHDP,
|
||||
}
|
||||
|
||||
pbHist = &mpb.Histogram{
|
||||
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
|
||||
DataPoints: pbHDP,
|
||||
}
|
||||
|
||||
otelDPtsInt64 = []metricdata.DataPoint[int64]{
|
||||
{Attributes: alice, StartTime: start, Time: end, Value: 1},
|
||||
{Attributes: bob, StartTime: start, Time: end, Value: 2},
|
||||
}
|
||||
otelDPtsFloat64 = []metricdata.DataPoint[float64]{
|
||||
{Attributes: alice, StartTime: start, Time: end, Value: 1.0},
|
||||
{Attributes: bob, StartTime: start, Time: end, Value: 2.0},
|
||||
}
|
||||
|
||||
pbDPtsInt64 = []*mpb.NumberDataPoint{
|
||||
{
|
||||
Attributes: []*cpb.KeyValue{pbAlice},
|
||||
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||
TimeUnixNano: uint64(end.UnixNano()),
|
||||
Value: &mpb.NumberDataPoint_AsInt{AsInt: 1},
|
||||
},
|
||||
{
|
||||
Attributes: []*cpb.KeyValue{pbBob},
|
||||
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||
TimeUnixNano: uint64(end.UnixNano()),
|
||||
Value: &mpb.NumberDataPoint_AsInt{AsInt: 2},
|
||||
},
|
||||
}
|
||||
pbDPtsFloat64 = []*mpb.NumberDataPoint{
|
||||
{
|
||||
Attributes: []*cpb.KeyValue{pbAlice},
|
||||
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||
TimeUnixNano: uint64(end.UnixNano()),
|
||||
Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0},
|
||||
},
|
||||
{
|
||||
Attributes: []*cpb.KeyValue{pbBob},
|
||||
StartTimeUnixNano: uint64(start.UnixNano()),
|
||||
TimeUnixNano: uint64(end.UnixNano()),
|
||||
Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0},
|
||||
},
|
||||
}
|
||||
|
||||
otelSumInt64 = metricdata.Sum[int64]{
|
||||
Temporality: metricdata.CumulativeTemporality,
|
||||
IsMonotonic: true,
|
||||
DataPoints: otelDPtsInt64,
|
||||
}
|
||||
otelSumFloat64 = metricdata.Sum[float64]{
|
||||
Temporality: metricdata.DeltaTemporality,
|
||||
IsMonotonic: false,
|
||||
DataPoints: otelDPtsFloat64,
|
||||
}
|
||||
otelSumInvalid = metricdata.Sum[float64]{
|
||||
Temporality: invalidTemporality,
|
||||
IsMonotonic: false,
|
||||
DataPoints: otelDPtsFloat64,
|
||||
}
|
||||
|
||||
pbSumInt64 = &mpb.Sum{
|
||||
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||
IsMonotonic: true,
|
||||
DataPoints: pbDPtsInt64,
|
||||
}
|
||||
pbSumFloat64 = &mpb.Sum{
|
||||
AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
|
||||
IsMonotonic: false,
|
||||
DataPoints: pbDPtsFloat64,
|
||||
}
|
||||
|
||||
otelGaugeInt64 = metricdata.Gauge[int64]{DataPoints: otelDPtsInt64}
|
||||
otelGaugeFloat64 = metricdata.Gauge[float64]{DataPoints: otelDPtsFloat64}
|
||||
|
||||
pbGaugeInt64 = &mpb.Gauge{DataPoints: pbDPtsInt64}
|
||||
pbGaugeFloat64 = &mpb.Gauge{DataPoints: pbDPtsFloat64}
|
||||
|
||||
unknownAgg unknownAggT
|
||||
otelMetrics = []metricdata.Metrics{
|
||||
{
|
||||
Name: "int64-gauge",
|
||||
Description: "Gauge with int64 values",
|
||||
Unit: unit.Dimensionless,
|
||||
Data: otelGaugeInt64,
|
||||
},
|
||||
{
|
||||
Name: "float64-gauge",
|
||||
Description: "Gauge with float64 values",
|
||||
Unit: unit.Dimensionless,
|
||||
Data: otelGaugeFloat64,
|
||||
},
|
||||
{
|
||||
Name: "int64-sum",
|
||||
Description: "Sum with int64 values",
|
||||
Unit: unit.Dimensionless,
|
||||
Data: otelSumInt64,
|
||||
},
|
||||
{
|
||||
Name: "float64-sum",
|
||||
Description: "Sum with float64 values",
|
||||
Unit: unit.Dimensionless,
|
||||
Data: otelSumFloat64,
|
||||
},
|
||||
{
|
||||
Name: "invalid-sum",
|
||||
Description: "Sum with invalid temporality",
|
||||
Unit: unit.Dimensionless,
|
||||
Data: otelSumInvalid,
|
||||
},
|
||||
{
|
||||
Name: "histogram",
|
||||
Description: "Histogram",
|
||||
Unit: unit.Dimensionless,
|
||||
Data: otelHist,
|
||||
},
|
||||
{
|
||||
Name: "invalid-histogram",
|
||||
Description: "Invalid histogram",
|
||||
Unit: unit.Dimensionless,
|
||||
Data: otelHistInvalid,
|
||||
},
|
||||
{
|
||||
Name: "unknown",
|
||||
Description: "Unknown aggregation",
|
||||
Unit: unit.Dimensionless,
|
||||
Data: unknownAgg,
|
||||
},
|
||||
}
|
||||
|
||||
pbMetrics = []*mpb.Metric{
|
||||
{
|
||||
Name: "int64-gauge",
|
||||
Description: "Gauge with int64 values",
|
||||
Unit: string(unit.Dimensionless),
|
||||
Data: &mpb.Metric_Gauge{Gauge: pbGaugeInt64},
|
||||
},
|
||||
{
|
||||
Name: "float64-gauge",
|
||||
Description: "Gauge with float64 values",
|
||||
Unit: string(unit.Dimensionless),
|
||||
Data: &mpb.Metric_Gauge{Gauge: pbGaugeFloat64},
|
||||
},
|
||||
{
|
||||
Name: "int64-sum",
|
||||
Description: "Sum with int64 values",
|
||||
Unit: string(unit.Dimensionless),
|
||||
Data: &mpb.Metric_Sum{Sum: pbSumInt64},
|
||||
},
|
||||
{
|
||||
Name: "float64-sum",
|
||||
Description: "Sum with float64 values",
|
||||
Unit: string(unit.Dimensionless),
|
||||
Data: &mpb.Metric_Sum{Sum: pbSumFloat64},
|
||||
},
|
||||
{
|
||||
Name: "histogram",
|
||||
Description: "Histogram",
|
||||
Unit: string(unit.Dimensionless),
|
||||
Data: &mpb.Metric_Histogram{Histogram: pbHist},
|
||||
},
|
||||
}
|
||||
|
||||
otelScopeMetrics = []metricdata.ScopeMetrics{{
|
||||
Scope: instrumentation.Scope{
|
||||
Name: "test/code/path",
|
||||
Version: "v0.1.0",
|
||||
SchemaURL: semconv.SchemaURL,
|
||||
},
|
||||
Metrics: otelMetrics,
|
||||
}}
|
||||
|
||||
pbScopeMetrics = []*mpb.ScopeMetrics{{
|
||||
Scope: &cpb.InstrumentationScope{
|
||||
Name: "test/code/path",
|
||||
Version: "v0.1.0",
|
||||
},
|
||||
Metrics: pbMetrics,
|
||||
SchemaUrl: semconv.SchemaURL,
|
||||
}}
|
||||
|
||||
otelRes = resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey.String("test server"),
|
||||
semconv.ServiceVersionKey.String("v0.1.0"),
|
||||
)
|
||||
|
||||
pbRes = &rpb.Resource{
|
||||
Attributes: []*cpb.KeyValue{
|
||||
{
|
||||
Key: "service.name",
|
||||
Value: &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_StringValue{StringValue: "test server"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Key: "service.version",
|
||||
Value: &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
otelResourceMetrics = metricdata.ResourceMetrics{
|
||||
Resource: otelRes,
|
||||
ScopeMetrics: otelScopeMetrics,
|
||||
}
|
||||
|
||||
pbResourceMetrics = &mpb.ResourceMetrics{
|
||||
Resource: pbRes,
|
||||
ScopeMetrics: pbScopeMetrics,
|
||||
SchemaUrl: semconv.SchemaURL,
|
||||
}
|
||||
)
|
||||
|
||||
func TestTransformations(t *testing.T) {
|
||||
// Run tests from the "bottom-up" of the metricdata data-types and halt
|
||||
// when a failure occurs to ensure the clearest failure message (as
|
||||
// opposed to the opposite of testing from the top-down which will obscure
|
||||
// errors deep inside the structs).
|
||||
|
||||
// DataPoint types.
|
||||
assert.Equal(t, pbHDP, HistogramDataPoints(otelHDP))
|
||||
assert.Equal(t, pbDPtsInt64, DataPoints[int64](otelDPtsInt64))
|
||||
require.Equal(t, pbDPtsFloat64, DataPoints[float64](otelDPtsFloat64))
|
||||
|
||||
// Aggregations.
|
||||
h, err := Histogram(otelHist)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, &mpb.Metric_Histogram{Histogram: pbHist}, h)
|
||||
h, err = Histogram(otelHistInvalid)
|
||||
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||
assert.Nil(t, h)
|
||||
|
||||
s, err := Sum[int64](otelSumInt64)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumInt64}, s)
|
||||
s, err = Sum[float64](otelSumFloat64)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumFloat64}, s)
|
||||
s, err = Sum[float64](otelSumInvalid)
|
||||
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||
assert.Nil(t, s)
|
||||
|
||||
assert.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeInt64}, Gauge[int64](otelGaugeInt64))
|
||||
require.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeFloat64}, Gauge[float64](otelGaugeFloat64))
|
||||
|
||||
// Metrics.
|
||||
m, err := Metrics(otelMetrics)
|
||||
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||
assert.ErrorIs(t, err, errUnknownAggregation)
|
||||
require.Equal(t, pbMetrics, m)
|
||||
|
||||
// Scope Metrics.
|
||||
sm, err := ScopeMetrics(otelScopeMetrics)
|
||||
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||
assert.ErrorIs(t, err, errUnknownAggregation)
|
||||
require.Equal(t, pbScopeMetrics, sm)
|
||||
|
||||
// Resource Metrics.
|
||||
rm, err := ResourceMetrics(otelResourceMetrics)
|
||||
assert.ErrorIs(t, err, errUnknownTemporality)
|
||||
assert.ErrorIs(t, err, errUnknownAggregation)
|
||||
require.Equal(t, pbResourceMetrics, rm)
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
|
||||
import "go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
|
||||
// Option are setting options passed to an Exporter on creation.
|
||||
type Option interface {
|
||||
apply(config) config
|
||||
}
|
||||
|
||||
type exporterOptionFunc func(config) config
|
||||
|
||||
func (fn exporterOptionFunc) apply(cfg config) config {
|
||||
return fn(cfg)
|
||||
}
|
||||
|
||||
type config struct {
|
||||
temporalitySelector aggregation.TemporalitySelector
|
||||
}
|
||||
|
||||
// WithMetricAggregationTemporalitySelector defines the aggregation.TemporalitySelector used
|
||||
// for selecting aggregation.Temporality (i.e., Cumulative vs. Delta
|
||||
// aggregation). If not specified otherwise, exporter will use a
|
||||
// cumulative temporality selector.
|
||||
func WithMetricAggregationTemporalitySelector(selector aggregation.TemporalitySelector) Option {
|
||||
return exporterOptionFunc(func(cfg config) config {
|
||||
cfg.temporalitySelector = selector
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
@@ -12,12 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
@@ -28,54 +29,49 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
// New returns an OpenTelemetry metric Exporter. The Exporter can be used with
|
||||
// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving
|
||||
// endpoint using gRPC.
|
||||
//
|
||||
// If an already established gRPC ClientConn is not passed in options using
|
||||
// WithGRPCConn, a connection to the OTLP endpoint will be established based
|
||||
// on options. If a connection cannot be establishes in the lifetime of ctx,
|
||||
// an error will be returned.
|
||||
func New(ctx context.Context, options ...Option) (metric.Exporter, error) {
|
||||
c, err := newClient(ctx, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return otlpmetric.New(c), nil
|
||||
}
|
||||
|
||||
type client struct {
|
||||
endpoint string
|
||||
dialOpts []grpc.DialOption
|
||||
metadata metadata.MD
|
||||
exportTimeout time.Duration
|
||||
requestFunc retry.RequestFunc
|
||||
|
||||
// stopCtx is used as a parent context for all exports. Therefore, when it
|
||||
// is canceled with the stopFunc all exports are canceled.
|
||||
stopCtx context.Context
|
||||
// stopFunc cancels stopCtx, stopping any active exports.
|
||||
stopFunc context.CancelFunc
|
||||
|
||||
// ourConn keeps track of where conn was created: true if created here on
|
||||
// Start, or false if passed with an option. This is important on Shutdown
|
||||
// as the conn should only be closed if created here on start. Otherwise,
|
||||
// ourConn keeps track of where conn was created: true if created here in
|
||||
// NewClient, or false if passed with an option. This is important on
|
||||
// Shutdown as the conn should only be closed if we created it. Otherwise,
|
||||
// it is up to the processes that passed the conn to close it.
|
||||
ourConn bool
|
||||
conn *grpc.ClientConn
|
||||
mscMu sync.RWMutex
|
||||
msc colmetricpb.MetricsServiceClient
|
||||
}
|
||||
|
||||
// Compile time check *client implements otlpmetric.Client.
|
||||
var _ otlpmetric.Client = (*client)(nil)
|
||||
|
||||
// NewClient creates a new gRPC metric client.
|
||||
func NewClient(opts ...Option) otlpmetric.Client {
|
||||
return newClient(opts...)
|
||||
}
|
||||
|
||||
func newClient(opts ...Option) *client {
|
||||
cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
// newClient creates a new gRPC metric client.
|
||||
func newClient(ctx context.Context, options ...Option) (otlpmetric.Client, error) {
|
||||
cfg := oconf.NewGRPCConfig(asGRPCOptions(options)...)
|
||||
|
||||
c := &client{
|
||||
endpoint: cfg.Metrics.Endpoint,
|
||||
exportTimeout: cfg.Metrics.Timeout,
|
||||
requestFunc: cfg.RetryConfig.RequestFunc(retryable),
|
||||
dialOpts: cfg.DialOptions,
|
||||
stopCtx: ctx,
|
||||
stopFunc: cancel,
|
||||
conn: cfg.GRPCConn,
|
||||
}
|
||||
|
||||
@@ -83,17 +79,12 @@ func newClient(opts ...Option) *client {
|
||||
c.metadata = metadata.New(cfg.Metrics.Headers)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Start establishes a gRPC connection to the collector.
|
||||
func (c *client) Start(ctx context.Context) error {
|
||||
if c.conn == nil {
|
||||
// If the caller did not provide a ClientConn when the client was
|
||||
// created, create one using the configuration they did provide.
|
||||
conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...)
|
||||
conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, cfg.DialOptions...)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// Keep track that we own the lifecycle of this conn and need to close
|
||||
// it on Shutdown.
|
||||
@@ -101,69 +92,30 @@ func (c *client) Start(ctx context.Context) error {
|
||||
c.conn = conn
|
||||
}
|
||||
|
||||
// The otlpmetric.Client interface states this method is called just once,
|
||||
// so no need to check if already started.
|
||||
c.mscMu.Lock()
|
||||
c.msc = colmetricpb.NewMetricsServiceClient(c.conn)
|
||||
c.mscMu.Unlock()
|
||||
|
||||
return nil
|
||||
return c, nil
|
||||
}
|
||||
|
||||
var errAlreadyStopped = errors.New("the client is already stopped")
|
||||
// ForceFlush does nothing, the client holds no state.
|
||||
func (c *client) ForceFlush(ctx context.Context) error { return ctx.Err() }
|
||||
|
||||
// Stop shuts down the client.
|
||||
// Shutdown shuts down the client, freeing all resource.
|
||||
//
|
||||
// Any active connections to a remote endpoint are closed if they were created
|
||||
// by the client. Any gRPC connection passed during creation using
|
||||
// WithGRPCConn will not be closed. It is the caller's responsibility to
|
||||
// handle cleanup of that resource.
|
||||
//
|
||||
// This method synchronizes with the UploadMetrics method of the client. It
|
||||
// will wait for any active calls to that method to complete unimpeded, or it
|
||||
// will cancel any active calls if ctx expires. If ctx expires, the context
|
||||
// error will be forwarded as the returned error. All client held resources
|
||||
// will still be released in this situation.
|
||||
//
|
||||
// If the client has already stopped, an error will be returned describing
|
||||
// this.
|
||||
func (c *client) Stop(ctx context.Context) error {
|
||||
// Acquire the c.mscMu lock within the ctx lifetime.
|
||||
acquired := make(chan struct{})
|
||||
go func() {
|
||||
c.mscMu.Lock()
|
||||
close(acquired)
|
||||
}()
|
||||
var err error
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// The Stop timeout is reached. Kill any remaining exports to force
|
||||
// the clear of the lock and save the timeout error to return and
|
||||
// signal the shutdown timed out before cleanly stopping.
|
||||
c.stopFunc()
|
||||
err = ctx.Err()
|
||||
func (c *client) Shutdown(ctx context.Context) error {
|
||||
// The otlpmetric.Exporter synchronizes access to client methods and
|
||||
// ensures this is called only once. The only thing that needs to be done
|
||||
// here is to release any computational resources the client holds.
|
||||
|
||||
// To ensure the client is not left in a dirty state c.msc needs to be
|
||||
// set to nil. To avoid the race condition when doing this, ensure
|
||||
// that all the exports are killed (initiated by c.stopFunc).
|
||||
<-acquired
|
||||
case <-acquired:
|
||||
}
|
||||
// Hold the mscMu lock for the rest of the function to ensure no new
|
||||
// exports are started.
|
||||
defer c.mscMu.Unlock()
|
||||
|
||||
// The otlpmetric.Client interface states this method is called only
|
||||
// once, but there is no guarantee it is called after Start. Ensure the
|
||||
// client is started before doing anything and let the called know if they
|
||||
// made a mistake.
|
||||
if c.msc == nil {
|
||||
return errAlreadyStopped
|
||||
}
|
||||
|
||||
// Clear c.msc to signal the client is stopped.
|
||||
c.metadata = nil
|
||||
c.requestFunc = nil
|
||||
c.msc = nil
|
||||
|
||||
err := ctx.Err()
|
||||
if c.ourConn {
|
||||
closeErr := c.conn.Close()
|
||||
// A context timeout error takes precedence over this error.
|
||||
@@ -171,25 +123,24 @@ func (c *client) Stop(ctx context.Context) error {
|
||||
err = closeErr
|
||||
}
|
||||
}
|
||||
c.conn = nil
|
||||
return err
|
||||
}
|
||||
|
||||
var errShutdown = errors.New("the client is shutdown")
|
||||
|
||||
// UploadMetrics sends a batch of spans.
|
||||
// UploadMetrics sends protoMetrics to connected endpoint.
|
||||
//
|
||||
// Retryable errors from the server will be handled according to any
|
||||
// RetryConfig the client was created with.
|
||||
func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
|
||||
// Hold a read lock to ensure a shut down initiated after this starts does
|
||||
// not abandon the export. This read lock acquire has less priority than a
|
||||
// write lock acquire (i.e. Stop), meaning if the client is shutting down
|
||||
// this will come after the shut down.
|
||||
c.mscMu.RLock()
|
||||
defer c.mscMu.RUnlock()
|
||||
// The otlpmetric.Exporter synchronizes access to client methods, and
|
||||
// ensures this is not called after the Exporter is shutdown. Only thing
|
||||
// to do here is send data.
|
||||
|
||||
if c.msc == nil {
|
||||
return errShutdown
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Do not upload if the context is already expired.
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
ctx, cancel := c.exportContext(ctx)
|
||||
@@ -209,7 +160,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou
|
||||
}
|
||||
|
||||
// exportContext returns a copy of parent with an appropriate deadline and
|
||||
// cancellation function.
|
||||
// cancellation function based on the clients configured export timeout.
|
||||
//
|
||||
// It is the callers responsibility to cancel the returned context once its
|
||||
// use is complete, via the parent or directly with the returned CancelFunc, to
|
||||
@@ -230,23 +181,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
|
||||
ctx = metadata.NewOutgoingContext(ctx, c.metadata)
|
||||
}
|
||||
|
||||
// Unify the client stopCtx with the parent.
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-c.stopCtx.Done():
|
||||
// Cancel the export as the shutdown has timed out.
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
// retryable returns if err identifies a request that can be retried and a
|
||||
// duration to wait for if an explicit throttle time is included in err.
|
||||
func retryable(err error) (bool, time.Duration) {
|
||||
//func retryable(err error) (bool, time.Duration) {
|
||||
s := status.Convert(err)
|
||||
switch s.Code() {
|
||||
case codes.Canceled,
|
||||
|
||||
@@ -12,320 +12,183 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetricgrpc_test
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package otlpmetricgrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/encoding/gzip"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
var (
|
||||
oneRecord = otlpmetrictest.OneRecordReader()
|
||||
|
||||
testResource = resource.Empty()
|
||||
)
|
||||
|
||||
func TestNewExporterEndToEnd(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
additionalOpts []otlpmetricgrpc.Option
|
||||
func TestThrottleDuration(t *testing.T) {
|
||||
c := codes.ResourceExhausted
|
||||
testcases := []struct {
|
||||
status *status.Status
|
||||
expected time.Duration
|
||||
}{
|
||||
{
|
||||
name: "StandardExporter",
|
||||
status: status.New(c, "NoRetryInfo"),
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
name: "WithCompressor",
|
||||
additionalOpts: []otlpmetricgrpc.Option{
|
||||
otlpmetricgrpc.WithCompressor(gzip.Name),
|
||||
},
|
||||
status: func() *status.Status {
|
||||
s, err := status.New(c, "SingleRetryInfo").WithDetails(
|
||||
&errdetails.RetryInfo{
|
||||
RetryDelay: durationpb.New(15 * time.Millisecond),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
expected: 15 * time.Millisecond,
|
||||
},
|
||||
{
|
||||
name: "WithServiceConfig",
|
||||
additionalOpts: []otlpmetricgrpc.Option{
|
||||
otlpmetricgrpc.WithServiceConfig("{}"),
|
||||
},
|
||||
status: func() *status.Status {
|
||||
s, err := status.New(c, "ErrorInfo").WithDetails(
|
||||
&errdetails.ErrorInfo{Reason: "no throttle detail"},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
name: "WithDialOptions",
|
||||
additionalOpts: []otlpmetricgrpc.Option{
|
||||
otlpmetricgrpc.WithDialOption(grpc.WithBlock()),
|
||||
},
|
||||
status: func() *status.Status {
|
||||
s, err := status.New(c, "ErrorAndRetryInfo").WithDetails(
|
||||
&errdetails.ErrorInfo{Reason: "with throttle detail"},
|
||||
&errdetails.RetryInfo{
|
||||
RetryDelay: durationpb.New(13 * time.Minute),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
expected: 13 * time.Minute,
|
||||
},
|
||||
{
|
||||
status: func() *status.Status {
|
||||
s, err := status.New(c, "DoubleRetryInfo").WithDetails(
|
||||
&errdetails.RetryInfo{
|
||||
RetryDelay: durationpb.New(13 * time.Minute),
|
||||
},
|
||||
&errdetails.RetryInfo{
|
||||
RetryDelay: durationpb.New(15 * time.Minute),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
expected: 13 * time.Minute,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
newExporterEndToEndTest(t, test.additionalOpts)
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.status.Message(), func(t *testing.T) {
|
||||
require.Equal(t, tc.expected, throttleDelay(tc.status))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newGRPCExporter(t *testing.T, ctx context.Context, endpoint string, additionalOpts ...otlpmetricgrpc.Option) *otlpmetric.Exporter {
|
||||
opts := []otlpmetricgrpc.Option{
|
||||
otlpmetricgrpc.WithInsecure(),
|
||||
otlpmetricgrpc.WithEndpoint(endpoint),
|
||||
otlpmetricgrpc.WithReconnectionPeriod(50 * time.Millisecond),
|
||||
func TestRetryable(t *testing.T) {
|
||||
retryableCodes := map[codes.Code]bool{
|
||||
codes.OK: false,
|
||||
codes.Canceled: true,
|
||||
codes.Unknown: false,
|
||||
codes.InvalidArgument: false,
|
||||
codes.DeadlineExceeded: true,
|
||||
codes.NotFound: false,
|
||||
codes.AlreadyExists: false,
|
||||
codes.PermissionDenied: false,
|
||||
codes.ResourceExhausted: true,
|
||||
codes.FailedPrecondition: false,
|
||||
codes.Aborted: true,
|
||||
codes.OutOfRange: true,
|
||||
codes.Unimplemented: false,
|
||||
codes.Internal: false,
|
||||
codes.Unavailable: true,
|
||||
codes.DataLoss: true,
|
||||
codes.Unauthenticated: false,
|
||||
}
|
||||
|
||||
opts = append(opts, additionalOpts...)
|
||||
client := otlpmetricgrpc.NewClient(opts...)
|
||||
exp, err := otlpmetric.New(ctx, client)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create a new collector exporter: %v", err)
|
||||
for c, want := range retryableCodes {
|
||||
got, _ := retryable(status.Error(c, ""))
|
||||
assert.Equalf(t, want, got, "evaluate(%s)", c)
|
||||
}
|
||||
return exp
|
||||
}
|
||||
|
||||
func newExporterEndToEndTest(t *testing.T, additionalOpts []otlpmetricgrpc.Option) {
|
||||
mc := runMockCollector(t)
|
||||
func TestClient(t *testing.T) {
|
||||
factory := func() (otlpmetric.Client, otest.Collector) {
|
||||
coll, err := otest.NewGRPCCollector("", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
_ = mc.stop()
|
||||
}()
|
||||
ctx := context.Background()
|
||||
addr := coll.Addr().String()
|
||||
client, err := newClient(ctx, WithEndpoint(addr), WithInsecure())
|
||||
require.NoError(t, err)
|
||||
return client, coll
|
||||
}
|
||||
|
||||
<-time.After(5 * time.Millisecond)
|
||||
|
||||
ctx := context.Background()
|
||||
exp := newGRPCExporter(t, ctx, mc.endpoint, additionalOpts...)
|
||||
defer func() {
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
if err := exp.Shutdown(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
otlpmetrictest.RunEndToEndTest(ctx, t, exp, mc)
|
||||
t.Run("Integration", otest.RunClientTests(factory))
|
||||
}
|
||||
|
||||
func TestExporterShutdown(t *testing.T) {
|
||||
mc := runMockCollector(t)
|
||||
defer func() {
|
||||
_ = mc.Stop()
|
||||
}()
|
||||
func TestConfig(t *testing.T) {
|
||||
factoryFunc := func(errCh <-chan error, o ...Option) (metric.Exporter, *otest.GRPCCollector) {
|
||||
coll, err := otest.NewGRPCCollector("", errCh)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-time.After(5 * time.Millisecond)
|
||||
ctx := context.Background()
|
||||
opts := append([]Option{
|
||||
WithEndpoint(coll.Addr().String()),
|
||||
WithInsecure(),
|
||||
}, o...)
|
||||
exp, err := New(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
return exp, coll
|
||||
}
|
||||
|
||||
otlpmetrictest.RunExporterShutdownTest(t, func() otlpmetric.Client {
|
||||
return otlpmetricgrpc.NewClient(
|
||||
otlpmetricgrpc.WithInsecure(),
|
||||
otlpmetricgrpc.WithEndpoint(mc.endpoint),
|
||||
otlpmetricgrpc.WithReconnectionPeriod(50*time.Millisecond),
|
||||
t.Run("WithHeaders", func(t *testing.T) {
|
||||
key := "my-custom-header"
|
||||
headers := map[string]string{key: "custom-value"}
|
||||
exp, coll := factoryFunc(nil, WithHeaders(headers))
|
||||
t.Cleanup(coll.Shutdown)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, exp.Export(ctx, metricdata.ResourceMetrics{}))
|
||||
// Ensure everything is flushed.
|
||||
require.NoError(t, exp.Shutdown(ctx))
|
||||
|
||||
got := coll.Headers()
|
||||
require.Contains(t, got, key)
|
||||
assert.Equal(t, got[key], []string{headers[key]})
|
||||
})
|
||||
|
||||
t.Run("WithTimeout", func(t *testing.T) {
|
||||
// Do not send on errCh so the Collector never responds to the client.
|
||||
errCh := make(chan error)
|
||||
t.Cleanup(func() { close(errCh) })
|
||||
exp, coll := factoryFunc(
|
||||
errCh,
|
||||
WithTimeout(time.Millisecond),
|
||||
WithRetry(RetryConfig{Enabled: false}),
|
||||
)
|
||||
t.Cleanup(coll.Shutdown)
|
||||
ctx := context.Background()
|
||||
t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) })
|
||||
err := exp.Export(ctx, metricdata.ResourceMetrics{})
|
||||
assert.ErrorContains(t, err, context.DeadlineExceeded.Error())
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewExporterInvokeStartThenStopManyTimes(t *testing.T) {
|
||||
mc := runMockCollector(t)
|
||||
defer func() {
|
||||
_ = mc.stop()
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
exp := newGRPCExporter(t, ctx, mc.endpoint)
|
||||
defer func() {
|
||||
if err := exp.Shutdown(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Invoke Start numerous times, should return errAlreadyStarted
|
||||
for i := 0; i < 10; i++ {
|
||||
if err := exp.Start(ctx); err == nil || !strings.Contains(err.Error(), "already started") {
|
||||
t.Fatalf("#%d unexpected Start error: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := exp.Shutdown(ctx); err != nil {
|
||||
t.Fatalf("failed to Shutdown the exporter: %v", err)
|
||||
}
|
||||
// Invoke Shutdown numerous times
|
||||
for i := 0; i < 10; i++ {
|
||||
if err := exp.Shutdown(ctx); err != nil {
|
||||
t.Fatalf(`#%d got error (%v) expected none`, i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This test takes a long time to run: to skip it, run tests using: -short.
|
||||
func TestNewExporterCollectorOnBadConnection(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skipf("Skipping this long running test")
|
||||
}
|
||||
|
||||
ln, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to grab an available port: %v", err)
|
||||
}
|
||||
// Firstly close the "collector's" channel: optimistically this endpoint won't get reused ASAP
|
||||
// However, our goal of closing it is to simulate an unavailable connection
|
||||
_ = ln.Close()
|
||||
|
||||
_, collectorPortStr, _ := net.SplitHostPort(ln.Addr().String())
|
||||
|
||||
endpoint := fmt.Sprintf("localhost:%s", collectorPortStr)
|
||||
ctx := context.Background()
|
||||
exp := newGRPCExporter(t, ctx, endpoint)
|
||||
_ = exp.Shutdown(ctx)
|
||||
}
|
||||
|
||||
func TestNewExporterWithEndpoint(t *testing.T) {
|
||||
mc := runMockCollector(t)
|
||||
defer func() {
|
||||
_ = mc.stop()
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
exp := newGRPCExporter(t, ctx, mc.endpoint)
|
||||
_ = exp.Shutdown(ctx)
|
||||
}
|
||||
|
||||
func TestNewExporterWithHeaders(t *testing.T) {
|
||||
mc := runMockCollector(t)
|
||||
defer func() {
|
||||
_ = mc.stop()
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
exp := newGRPCExporter(t, ctx, mc.endpoint,
|
||||
otlpmetricgrpc.WithHeaders(map[string]string{"header1": "value1"}))
|
||||
require.NoError(t, exp.Export(ctx, testResource, oneRecord))
|
||||
|
||||
defer func() {
|
||||
_ = exp.Shutdown(ctx)
|
||||
}()
|
||||
|
||||
headers := mc.getHeaders()
|
||||
require.Len(t, headers.Get("header1"), 1)
|
||||
assert.Equal(t, "value1", headers.Get("header1")[0])
|
||||
}
|
||||
|
||||
func TestNewExporterWithTimeout(t *testing.T) {
|
||||
tts := []struct {
|
||||
name string
|
||||
fn func(exp *otlpmetric.Exporter) error
|
||||
timeout time.Duration
|
||||
metrics int
|
||||
spans int
|
||||
code codes.Code
|
||||
delay bool
|
||||
}{
|
||||
{
|
||||
name: "Timeout Metrics",
|
||||
fn: func(exp *otlpmetric.Exporter) error {
|
||||
return exp.Export(context.Background(), testResource, oneRecord)
|
||||
},
|
||||
timeout: time.Millisecond * 100,
|
||||
code: codes.DeadlineExceeded,
|
||||
delay: true,
|
||||
},
|
||||
|
||||
{
|
||||
name: "No Timeout Metrics",
|
||||
fn: func(exp *otlpmetric.Exporter) error {
|
||||
return exp.Export(context.Background(), testResource, oneRecord)
|
||||
},
|
||||
timeout: time.Minute,
|
||||
metrics: 1,
|
||||
code: codes.OK,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tts {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mc := runMockCollector(t)
|
||||
if tt.delay {
|
||||
mc.metricSvc.delay = time.Second * 10
|
||||
}
|
||||
defer func() {
|
||||
_ = mc.stop()
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
exp := newGRPCExporter(t, ctx, mc.endpoint, otlpmetricgrpc.WithTimeout(tt.timeout), otlpmetricgrpc.WithRetry(otlpmetricgrpc.RetryConfig{Enabled: false}))
|
||||
defer func() {
|
||||
_ = exp.Shutdown(ctx)
|
||||
}()
|
||||
|
||||
err := tt.fn(exp)
|
||||
|
||||
if tt.code == codes.OK {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
s := status.Convert(err)
|
||||
require.Equal(t, tt.code, s.Code())
|
||||
|
||||
require.Len(t, mc.getMetrics(), tt.metrics)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartErrorInvalidAddress(t *testing.T) {
|
||||
client := otlpmetricgrpc.NewClient(
|
||||
otlpmetricgrpc.WithInsecure(),
|
||||
// Validate the connection in Start (which should return the error).
|
||||
otlpmetricgrpc.WithDialOption(
|
||||
grpc.WithBlock(),
|
||||
grpc.FailOnNonTempDialError(true),
|
||||
),
|
||||
otlpmetricgrpc.WithEndpoint("invalid"),
|
||||
otlpmetricgrpc.WithReconnectionPeriod(time.Hour),
|
||||
)
|
||||
err := client.Start(context.Background())
|
||||
assert.EqualError(t, err, `connection error: desc = "transport: error while dialing: dial tcp: address invalid: missing port in address"`)
|
||||
}
|
||||
|
||||
func TestEmptyData(t *testing.T) {
|
||||
mc := runMockCollector(t)
|
||||
|
||||
defer func() {
|
||||
_ = mc.stop()
|
||||
}()
|
||||
|
||||
<-time.After(5 * time.Millisecond)
|
||||
|
||||
ctx := context.Background()
|
||||
exp := newGRPCExporter(t, ctx, mc.endpoint)
|
||||
defer func() {
|
||||
assert.NoError(t, exp.Shutdown(ctx))
|
||||
}()
|
||||
|
||||
assert.NoError(t, exp.Export(ctx, testResource, otlpmetrictest.EmptyReader()))
|
||||
}
|
||||
|
||||
func TestFailedMetricTransform(t *testing.T) {
|
||||
mc := runMockCollector(t)
|
||||
|
||||
defer func() {
|
||||
_ = mc.stop()
|
||||
}()
|
||||
|
||||
<-time.After(5 * time.Millisecond)
|
||||
|
||||
ctx := context.Background()
|
||||
exp := newGRPCExporter(t, ctx, mc.endpoint)
|
||||
defer func() {
|
||||
assert.NoError(t, exp.Shutdown(ctx))
|
||||
}()
|
||||
|
||||
assert.Error(t, exp.Export(ctx, testResource, otlpmetrictest.FailReader{}))
|
||||
}
|
||||
|
||||
@@ -1,193 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetricgrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
)
|
||||
|
||||
func TestThrottleDuration(t *testing.T) {
|
||||
c := codes.ResourceExhausted
|
||||
testcases := []struct {
|
||||
status *status.Status
|
||||
expected time.Duration
|
||||
}{
|
||||
{
|
||||
status: status.New(c, "no retry info"),
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
status: func() *status.Status {
|
||||
s, err := status.New(c, "single retry info").WithDetails(
|
||||
&errdetails.RetryInfo{
|
||||
RetryDelay: durationpb.New(15 * time.Millisecond),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
expected: 15 * time.Millisecond,
|
||||
},
|
||||
{
|
||||
status: func() *status.Status {
|
||||
s, err := status.New(c, "error info").WithDetails(
|
||||
&errdetails.ErrorInfo{Reason: "no throttle detail"},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
expected: 0,
|
||||
},
|
||||
{
|
||||
status: func() *status.Status {
|
||||
s, err := status.New(c, "error and retry info").WithDetails(
|
||||
&errdetails.ErrorInfo{Reason: "with throttle detail"},
|
||||
&errdetails.RetryInfo{
|
||||
RetryDelay: durationpb.New(13 * time.Minute),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
expected: 13 * time.Minute,
|
||||
},
|
||||
{
|
||||
status: func() *status.Status {
|
||||
s, err := status.New(c, "double retry info").WithDetails(
|
||||
&errdetails.RetryInfo{
|
||||
RetryDelay: durationpb.New(13 * time.Minute),
|
||||
},
|
||||
&errdetails.RetryInfo{
|
||||
RetryDelay: durationpb.New(15 * time.Minute),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
expected: 13 * time.Minute,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.status.Message(), func(t *testing.T) {
|
||||
require.Equal(t, tc.expected, throttleDelay(tc.status))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetryable(t *testing.T) {
|
||||
retryableCodes := map[codes.Code]bool{
|
||||
codes.OK: false,
|
||||
codes.Canceled: true,
|
||||
codes.Unknown: false,
|
||||
codes.InvalidArgument: false,
|
||||
codes.DeadlineExceeded: true,
|
||||
codes.NotFound: false,
|
||||
codes.AlreadyExists: false,
|
||||
codes.PermissionDenied: false,
|
||||
codes.ResourceExhausted: true,
|
||||
codes.FailedPrecondition: false,
|
||||
codes.Aborted: true,
|
||||
codes.OutOfRange: true,
|
||||
codes.Unimplemented: false,
|
||||
codes.Internal: false,
|
||||
codes.Unavailable: true,
|
||||
codes.DataLoss: true,
|
||||
codes.Unauthenticated: false,
|
||||
}
|
||||
|
||||
for c, want := range retryableCodes {
|
||||
got, _ := retryable(status.Error(c, ""))
|
||||
assert.Equalf(t, want, got, "evaluate(%s)", c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnstartedStop(t *testing.T) {
|
||||
client := NewClient()
|
||||
assert.ErrorIs(t, client.Stop(context.Background()), errAlreadyStopped)
|
||||
}
|
||||
|
||||
func TestUnstartedUploadMetric(t *testing.T) {
|
||||
client := NewClient()
|
||||
assert.ErrorIs(t, client.UploadMetrics(context.Background(), nil), errShutdown)
|
||||
}
|
||||
|
||||
func TestExportContextHonorsParentDeadline(t *testing.T) {
|
||||
now := time.Now()
|
||||
ctx, cancel := context.WithDeadline(context.Background(), now)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
// Without a client timeout, the parent deadline should be used.
|
||||
client := newClient(WithTimeout(0))
|
||||
eCtx, eCancel := client.exportContext(ctx)
|
||||
t.Cleanup(eCancel)
|
||||
|
||||
deadline, ok := eCtx.Deadline()
|
||||
assert.True(t, ok, "deadline not propagated to child context")
|
||||
assert.Equal(t, now, deadline)
|
||||
}
|
||||
|
||||
func TestExportContextHonorsClientTimeout(t *testing.T) {
|
||||
// Setting a timeout should ensure a deadline is set on the context.
|
||||
client := newClient(WithTimeout(1 * time.Second))
|
||||
ctx, cancel := client.exportContext(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
_, ok := ctx.Deadline()
|
||||
assert.True(t, ok, "timeout not set as deadline for child context")
|
||||
}
|
||||
|
||||
func TestExportContextLinksStopSignal(t *testing.T) {
|
||||
rootCtx := context.Background()
|
||||
|
||||
client := newClient(WithInsecure())
|
||||
t.Cleanup(func() { require.NoError(t, client.Stop(rootCtx)) })
|
||||
require.NoError(t, client.Start(rootCtx))
|
||||
|
||||
ctx, cancel := client.exportContext(rootCtx)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
require.False(t, func() bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return true
|
||||
default:
|
||||
}
|
||||
return false
|
||||
}(), "context should not be done prior to canceling it")
|
||||
|
||||
// The client.stopFunc cancels the client.stopCtx. This should have been
|
||||
// setup as a parent of ctx. Therefore, it should cancel ctx as well.
|
||||
client.stopFunc()
|
||||
|
||||
// Assert this with Eventually to account for goroutine scheduler timing.
|
||||
assert.Eventually(t, func() bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return true
|
||||
default:
|
||||
}
|
||||
return false
|
||||
}, 10*time.Second, time.Microsecond)
|
||||
}
|
||||
241
exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
Normal file
241
exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
Normal file
@@ -0,0 +1,241 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
)
|
||||
|
||||
// Option applies a configuration option to the Exporter.
|
||||
type Option interface {
|
||||
applyGRPCOption(oconf.Config) oconf.Config
|
||||
}
|
||||
|
||||
func asGRPCOptions(opts []Option) []oconf.GRPCOption {
|
||||
converted := make([]oconf.GRPCOption, len(opts))
|
||||
for i, o := range opts {
|
||||
converted[i] = oconf.NewGRPCOption(o.applyGRPCOption)
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
// RetryConfig defines configuration for retrying the export of metric data
|
||||
// that failed.
|
||||
//
|
||||
// This configuration does not define any network retry strategy. That is
|
||||
// entirely handled by the gRPC ClientConn.
|
||||
type RetryConfig retry.Config
|
||||
|
||||
type wrappedOption struct {
|
||||
oconf.GRPCOption
|
||||
}
|
||||
|
||||
func (w wrappedOption) applyGRPCOption(cfg oconf.Config) oconf.Config {
|
||||
return w.ApplyGRPCOption(cfg)
|
||||
}
|
||||
|
||||
// WithInsecure disables client transport security for the Exporter's gRPC
|
||||
// connection, just like grpc.WithInsecure()
|
||||
// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used to determine client security. If the endpoint has a
|
||||
// scheme of "http" or "unix" client security will be disabled. If both are
|
||||
// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, client security will be used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithInsecure() Option {
|
||||
return wrappedOption{oconf.WithInsecure()}
|
||||
}
|
||||
|
||||
// WithEndpoint sets the target endpoint the Exporter will connect to.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, "localhost:4317" will be used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithEndpoint(endpoint string) Option {
|
||||
return wrappedOption{oconf.WithEndpoint(endpoint)}
|
||||
}
|
||||
|
||||
// WithReconnectionPeriod set the minimum amount of time between connection
|
||||
// attempts to the target endpoint.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithReconnectionPeriod(rp time.Duration) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.ReconnectionPeriod = rp
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
func compressorToCompression(compressor string) oconf.Compression {
|
||||
if compressor == "gzip" {
|
||||
return oconf.GzipCompression
|
||||
}
|
||||
|
||||
otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
|
||||
return oconf.NoCompression
|
||||
}
|
||||
|
||||
// WithCompressor sets the compressor the gRPC client uses.
|
||||
//
|
||||
// It is the responsibility of the caller to ensure that the compressor set
|
||||
// has been registered with google.golang.org/grpc/encoding (see
|
||||
// encoding.RegisterCompressor for more information). For example, to register
|
||||
// the gzip compressor import the package:
|
||||
//
|
||||
// import _ "google.golang.org/grpc/encoding/gzip"
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_COMPRESSION or
|
||||
// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
|
||||
// this option is not passed, that variable value will be used. That value can
|
||||
// be either "none" or "gzip". If both are set,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, no compressor will be used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithCompressor(compressor string) Option {
|
||||
return wrappedOption{oconf.WithCompression(compressorToCompression(compressor))}
|
||||
}
|
||||
|
||||
// WithHeaders will send the provided headers with each gRPC requests.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. The value will be parsed as a list of key value pairs.
|
||||
// These pairs are expected to be in the W3C Correlation-Context format
|
||||
// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
|
||||
// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, no user headers will be set.
|
||||
func WithHeaders(headers map[string]string) Option {
|
||||
return wrappedOption{oconf.WithHeaders(headers)}
|
||||
}
|
||||
|
||||
// WithTLSCredentials sets the gRPC connection to use creds.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
|
||||
// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and
|
||||
// this option is not passed, that variable value will be used. The value will
|
||||
// be parsed the filepath of the TLS certificate chain to use. If both are
|
||||
// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, no TLS credentials will be used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithTLSCredentials(creds credentials.TransportCredentials) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.Metrics.GRPCCredentials = creds
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithServiceConfig defines the default gRPC service config used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithServiceConfig(serviceConfig string) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.ServiceConfig = serviceConfig
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithDialOption sets explicit grpc.DialOptions to use when establishing a
|
||||
// gRPC connection. The options here are appended to the internal grpc.DialOptions
|
||||
// used so they will take precedence over any other internal grpc.DialOptions
|
||||
// they might conflict with.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithDialOption(opts ...grpc.DialOption) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.DialOptions = opts
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
|
||||
//
|
||||
// This option takes precedence over any other option that relates to
|
||||
// establishing or persisting a gRPC connection to a target endpoint. Any
|
||||
// other option of those types passed will be ignored.
|
||||
//
|
||||
// It is the callers responsibility to close the passed conn. The Exporter
|
||||
// Shutdown method will not close this connection.
|
||||
func WithGRPCConn(conn *grpc.ClientConn) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.GRPCConn = conn
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithTimeout sets the max amount of time an Exporter will attempt an export.
|
||||
//
|
||||
// This takes precedence over any retry settings defined by WithRetry. Once
|
||||
// this time limit has been reached the export is abandoned and the metric
|
||||
// data is dropped.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. The value will be parsed as an integer representing the
|
||||
// timeout in milliseconds. If both are set,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, a timeout of 10 seconds will be used.
|
||||
func WithTimeout(duration time.Duration) Option {
|
||||
return wrappedOption{oconf.WithTimeout(duration)}
|
||||
}
|
||||
|
||||
// WithRetry sets the retry policy for transient retryable errors that are
|
||||
// returned by the target endpoint.
|
||||
//
|
||||
// If the target endpoint responds with not only a retryable error, but
|
||||
// explicitly returns a backoff time in the response, that time will take
|
||||
// precedence over these settings.
|
||||
//
|
||||
// These settings do not define any network retry strategy. That is entirely
|
||||
// handled by the gRPC ClientConn.
|
||||
//
|
||||
// If unset, the default retry policy will be used. It will retry the export
|
||||
// 5 seconds after receiving a retryable error and increase exponentially
|
||||
// after each error for no more than a total time of 1 minute.
|
||||
func WithRetry(settings RetryConfig) Option {
|
||||
return wrappedOption{oconf.WithRetry(retry.Config(settings))}
|
||||
}
|
||||
@@ -12,20 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package otlpmetricgrpc provides an otlpmetric.Exporter that communicates
|
||||
// with an OTLP receiving endpoint using gRPC.
|
||||
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
)
|
||||
|
||||
// New constructs a new Exporter and starts it.
|
||||
func New(ctx context.Context, opts ...Option) (*otlpmetric.Exporter, error) {
|
||||
return otlpmetric.New(ctx, NewClient(opts...))
|
||||
}
|
||||
|
||||
// NewUnstarted constructs a new Exporter and does not start it.
|
||||
func NewUnstarted(opts ...Option) *otlpmetric.Exporter {
|
||||
return otlpmetric.NewUnstarted(NewClient(opts...))
|
||||
}
|
||||
@@ -12,192 +12,34 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package otlpmetricgrpc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
"go.opentelemetry.io/otel/metric/global"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
)
|
||||
|
||||
func Example_insecure() {
|
||||
func Example() {
|
||||
ctx := context.Background()
|
||||
client := otlpmetricgrpc.NewClient(otlpmetricgrpc.WithInsecure())
|
||||
exp, err := otlpmetric.New(ctx, client)
|
||||
exp, err := otlpmetricgrpc.New(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create the collector exporter: %v", err)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
meterProvider := metric.NewMeterProvider(metric.WithReader(metric.NewPeriodicReader(exp)))
|
||||
defer func() {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
if err := exp.Shutdown(ctx); err != nil {
|
||||
otel.Handle(err)
|
||||
if err := meterProvider.Shutdown(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
global.SetMeterProvider(meterProvider)
|
||||
|
||||
pusher := controller.New(
|
||||
processor.NewFactory(
|
||||
simple.NewWithHistogramDistribution(),
|
||||
exp,
|
||||
),
|
||||
controller.WithExporter(exp),
|
||||
controller.WithCollectPeriod(2*time.Second),
|
||||
)
|
||||
|
||||
global.SetMeterProvider(pusher)
|
||||
|
||||
if err := pusher.Start(ctx); err != nil {
|
||||
log.Fatalf("could not start metric controller: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
// pushes any last exports to the receiver
|
||||
if err := pusher.Stop(ctx); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}()
|
||||
|
||||
meter := global.Meter("go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc_test")
|
||||
|
||||
// Recorder metric example
|
||||
|
||||
counter, err := meter.SyncFloat64().Counter("an_important_metric", instrument.WithDescription("Measures the cumulative epicness of the app"))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create the instrument: %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
log.Printf("Doing really hard work (%d / 10)\n", i+1)
|
||||
counter.Add(ctx, 1.0)
|
||||
}
|
||||
}
|
||||
|
||||
func Example_withTLS() {
|
||||
// Please take at look at https://pkg.go.dev/google.golang.org/grpc/credentials#TransportCredentials
|
||||
// for ways on how to initialize gRPC TransportCredentials.
|
||||
creds, err := credentials.NewClientTLSFromFile("my-cert.pem", "")
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create gRPC client TLS credentials: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
client := otlpmetricgrpc.NewClient(otlpmetricgrpc.WithTLSCredentials(creds))
|
||||
exp, err := otlpmetric.New(ctx, client)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create the collector exporter: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
if err := exp.Shutdown(ctx); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}()
|
||||
|
||||
pusher := controller.New(
|
||||
processor.NewFactory(
|
||||
simple.NewWithHistogramDistribution(),
|
||||
exp,
|
||||
),
|
||||
controller.WithExporter(exp),
|
||||
controller.WithCollectPeriod(2*time.Second),
|
||||
)
|
||||
|
||||
global.SetMeterProvider(pusher)
|
||||
|
||||
if err := pusher.Start(ctx); err != nil {
|
||||
log.Fatalf("could not start metric controller: %v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
// pushes any last exports to the receiver
|
||||
if err := pusher.Stop(ctx); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}()
|
||||
|
||||
meter := global.Meter("go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc_test")
|
||||
|
||||
// Recorder metric example
|
||||
counter, err := meter.SyncFloat64().Counter("an_important_metric", instrument.WithDescription("Measures the cumulative epicness of the app"))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create the instrument: %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
log.Printf("Doing really hard work (%d / 10)\n", i+1)
|
||||
counter.Add(ctx, 1.0)
|
||||
}
|
||||
}
|
||||
|
||||
func Example_withDifferentSignalCollectors() {
|
||||
client := otlpmetricgrpc.NewClient(
|
||||
otlpmetricgrpc.WithInsecure(),
|
||||
otlpmetricgrpc.WithEndpoint("localhost:30080"),
|
||||
)
|
||||
ctx := context.Background()
|
||||
exp, err := otlpmetric.New(ctx, client)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create the collector exporter: %v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
if err := exp.Shutdown(ctx); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}()
|
||||
|
||||
pusher := controller.New(
|
||||
processor.NewFactory(
|
||||
simple.NewWithHistogramDistribution(),
|
||||
exp,
|
||||
),
|
||||
controller.WithExporter(exp),
|
||||
controller.WithCollectPeriod(2*time.Second),
|
||||
)
|
||||
|
||||
global.SetMeterProvider(pusher)
|
||||
|
||||
if err := pusher.Start(ctx); err != nil {
|
||||
log.Fatalf("could not start metric controller: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
// pushes any last exports to the receiver
|
||||
if err := pusher.Stop(ctx); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}()
|
||||
|
||||
meter := global.Meter("go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc_test")
|
||||
|
||||
// Recorder metric example
|
||||
counter, err := meter.SyncFloat64().Counter("an_important_metric", instrument.WithDescription("Measures the cumulative epicness of the app"))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create the instrument: %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
log.Printf("Doing really hard work (%d / 10)\n", i+1)
|
||||
counter.Add(ctx, 1.0)
|
||||
}
|
||||
|
||||
log.Printf("Done!")
|
||||
// From here, the meterProvider can be used by instrumentation to collect
|
||||
// telemetry.
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
|
||||
|
||||
go 1.17
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/stretchr/testify v1.7.1
|
||||
@@ -8,7 +8,6 @@ require (
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.31.0
|
||||
go.opentelemetry.io/otel/metric v0.31.0
|
||||
go.opentelemetry.io/otel/sdk v1.10.0
|
||||
go.opentelemetry.io/otel/sdk/metric v0.31.0
|
||||
go.opentelemetry.io/proto/otlp v0.19.0
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1
|
||||
@@ -22,8 +21,10 @@ require (
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.10.0 // indirect
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 // indirect
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect
|
||||
|
||||
@@ -35,7 +35,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@@ -114,6 +113,7 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
|
||||
@@ -1,169 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetricgrpc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest"
|
||||
collectormetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
func makeMockCollector(t *testing.T, mockConfig *mockConfig) *mockCollector {
|
||||
return &mockCollector{
|
||||
t: t,
|
||||
metricSvc: &mockMetricService{
|
||||
storage: otlpmetrictest.NewMetricsStorage(),
|
||||
errors: mockConfig.errors,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type mockMetricService struct {
|
||||
collectormetricpb.UnimplementedMetricsServiceServer
|
||||
|
||||
requests int
|
||||
errors []error
|
||||
|
||||
headers metadata.MD
|
||||
mu sync.RWMutex
|
||||
storage otlpmetrictest.MetricsStorage
|
||||
delay time.Duration
|
||||
}
|
||||
|
||||
func (mms *mockMetricService) getHeaders() metadata.MD {
|
||||
mms.mu.RLock()
|
||||
defer mms.mu.RUnlock()
|
||||
return mms.headers
|
||||
}
|
||||
|
||||
func (mms *mockMetricService) getMetrics() []*metricpb.Metric {
|
||||
mms.mu.RLock()
|
||||
defer mms.mu.RUnlock()
|
||||
return mms.storage.GetMetrics()
|
||||
}
|
||||
|
||||
func (mms *mockMetricService) Export(ctx context.Context, exp *collectormetricpb.ExportMetricsServiceRequest) (*collectormetricpb.ExportMetricsServiceResponse, error) {
|
||||
if mms.delay > 0 {
|
||||
time.Sleep(mms.delay)
|
||||
}
|
||||
|
||||
mms.mu.Lock()
|
||||
defer func() {
|
||||
mms.requests++
|
||||
mms.mu.Unlock()
|
||||
}()
|
||||
|
||||
reply := &collectormetricpb.ExportMetricsServiceResponse{}
|
||||
if mms.requests < len(mms.errors) {
|
||||
idx := mms.requests
|
||||
return reply, mms.errors[idx]
|
||||
}
|
||||
|
||||
mms.headers, _ = metadata.FromIncomingContext(ctx)
|
||||
mms.storage.AddMetrics(exp)
|
||||
return reply, nil
|
||||
}
|
||||
|
||||
type mockCollector struct {
|
||||
t *testing.T
|
||||
|
||||
metricSvc *mockMetricService
|
||||
|
||||
endpoint string
|
||||
stopFunc func()
|
||||
stopOnce sync.Once
|
||||
}
|
||||
|
||||
type mockConfig struct {
|
||||
errors []error
|
||||
endpoint string
|
||||
}
|
||||
|
||||
var _ collectormetricpb.MetricsServiceServer = (*mockMetricService)(nil)
|
||||
|
||||
var errAlreadyStopped = fmt.Errorf("already stopped")
|
||||
|
||||
func (mc *mockCollector) stop() error {
|
||||
var err = errAlreadyStopped
|
||||
mc.stopOnce.Do(func() {
|
||||
err = nil
|
||||
if mc.stopFunc != nil {
|
||||
mc.stopFunc()
|
||||
}
|
||||
})
|
||||
// Give it sometime to shutdown.
|
||||
<-time.After(160 * time.Millisecond)
|
||||
|
||||
// Wait for services to finish reading/writing.
|
||||
// Getting the lock ensures the metricSvc is done flushing.
|
||||
mc.metricSvc.mu.Lock()
|
||||
defer mc.metricSvc.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func (mc *mockCollector) Stop() error {
|
||||
return mc.stop()
|
||||
}
|
||||
|
||||
func (mc *mockCollector) getHeaders() metadata.MD {
|
||||
return mc.metricSvc.getHeaders()
|
||||
}
|
||||
|
||||
func (mc *mockCollector) getMetrics() []*metricpb.Metric {
|
||||
return mc.metricSvc.getMetrics()
|
||||
}
|
||||
|
||||
func (mc *mockCollector) GetMetrics() []*metricpb.Metric {
|
||||
return mc.getMetrics()
|
||||
}
|
||||
|
||||
// runMockCollector is a helper function to create a mock Collector.
|
||||
func runMockCollector(t *testing.T) *mockCollector {
|
||||
return runMockCollectorAtEndpoint(t, "localhost:0")
|
||||
}
|
||||
|
||||
func runMockCollectorAtEndpoint(t *testing.T, endpoint string) *mockCollector {
|
||||
return runMockCollectorWithConfig(t, &mockConfig{endpoint: endpoint})
|
||||
}
|
||||
|
||||
func runMockCollectorWithConfig(t *testing.T, mockConfig *mockConfig) *mockCollector {
|
||||
ln, err := net.Listen("tcp", mockConfig.endpoint)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get an endpoint: %v", err)
|
||||
}
|
||||
|
||||
srv := grpc.NewServer()
|
||||
mc := makeMockCollector(t, mockConfig)
|
||||
collectormetricpb.RegisterMetricsServiceServer(srv, mc.metricSvc)
|
||||
go func() {
|
||||
_ = srv.Serve(ln)
|
||||
}()
|
||||
|
||||
mc.endpoint = ln.Addr().String()
|
||||
// srv.Stop calls Close on mc.ln.
|
||||
mc.stopFunc = srv.Stop
|
||||
|
||||
return mc
|
||||
}
|
||||
@@ -1,189 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
)
|
||||
|
||||
// Option applies an option to the gRPC driver.
|
||||
type Option interface {
|
||||
applyGRPCOption(otlpconfig.Config) otlpconfig.Config
|
||||
}
|
||||
|
||||
func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption {
|
||||
converted := make([]otlpconfig.GRPCOption, len(opts))
|
||||
for i, o := range opts {
|
||||
converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption)
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
// RetryConfig defines configuration for retrying export of span batches that
|
||||
// failed to be received by the target endpoint.
|
||||
//
|
||||
// This configuration does not define any network retry strategy. That is
|
||||
// entirely handled by the gRPC ClientConn.
|
||||
type RetryConfig retry.Config
|
||||
|
||||
type wrappedOption struct {
|
||||
otlpconfig.GRPCOption
|
||||
}
|
||||
|
||||
func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
return w.ApplyGRPCOption(cfg)
|
||||
}
|
||||
|
||||
// WithInsecure disables client transport security for the exporter's gRPC
|
||||
// connection just like grpc.WithInsecure()
|
||||
// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by
|
||||
// default, client security is required unless WithInsecure is used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithInsecure() Option {
|
||||
return wrappedOption{otlpconfig.WithInsecure()}
|
||||
}
|
||||
|
||||
// WithEndpoint sets the target endpoint the exporter will connect to. If
|
||||
// unset, localhost:4317 will be used as a default.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithEndpoint(endpoint string) Option {
|
||||
return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
|
||||
}
|
||||
|
||||
// WithReconnectionPeriod set the minimum amount of time between connection
|
||||
// attempts to the target endpoint.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithReconnectionPeriod(rp time.Duration) Option {
|
||||
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
cfg.ReconnectionPeriod = rp
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
func compressorToCompression(compressor string) otlpconfig.Compression {
|
||||
if compressor == "gzip" {
|
||||
return otlpconfig.GzipCompression
|
||||
}
|
||||
|
||||
otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
|
||||
return otlpconfig.NoCompression
|
||||
}
|
||||
|
||||
// WithCompressor sets the compressor for the gRPC client to use when sending
|
||||
// requests. It is the responsibility of the caller to ensure that the
|
||||
// compressor set has been registered with google.golang.org/grpc/encoding.
|
||||
// This can be done by encoding.RegisterCompressor. Some compressors
|
||||
// auto-register on import, such as gzip, which can be registered by calling
|
||||
// `import _ "google.golang.org/grpc/encoding/gzip"`.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithCompressor(compressor string) Option {
|
||||
return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
|
||||
}
|
||||
|
||||
// WithHeaders will send the provided headers with each gRPC requests.
|
||||
func WithHeaders(headers map[string]string) Option {
|
||||
return wrappedOption{otlpconfig.WithHeaders(headers)}
|
||||
}
|
||||
|
||||
// WithTLSCredentials allows the connection to use TLS credentials when
|
||||
// talking to the server. It takes in grpc.TransportCredentials instead of say
|
||||
// a Certificate file or a tls.Certificate, because the retrieving of these
|
||||
// credentials can be done in many ways e.g. plain file, in code tls.Config or
|
||||
// by certificate rotation, so it is up to the caller to decide what to use.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithTLSCredentials(creds credentials.TransportCredentials) Option {
|
||||
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
cfg.Metrics.GRPCCredentials = creds
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithServiceConfig defines the default gRPC service config used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithServiceConfig(serviceConfig string) Option {
|
||||
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
cfg.ServiceConfig = serviceConfig
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithDialOption sets explicit grpc.DialOptions to use when making a
|
||||
// connection. The options here are appended to the internal grpc.DialOptions
|
||||
// used so they will take precedence over any other internal grpc.DialOptions
|
||||
// they might conflict with.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithDialOption(opts ...grpc.DialOption) Option {
|
||||
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
cfg.DialOptions = opts
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
|
||||
//
|
||||
// This option takes precedence over any other option that relates to
|
||||
// establishing or persisting a gRPC connection to a target endpoint. Any
|
||||
// other option of those types passed will be ignored.
|
||||
//
|
||||
// It is the callers responsibility to close the passed conn. The client
|
||||
// Shutdown method will not close this connection.
|
||||
func WithGRPCConn(conn *grpc.ClientConn) Option {
|
||||
return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
cfg.GRPCConn = conn
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithTimeout sets the max amount of time a client will attempt to export a
|
||||
// batch of spans. This takes precedence over any retry settings defined with
|
||||
// WithRetry, once this time limit has been reached the export is abandoned
|
||||
// and the batch of spans is dropped.
|
||||
//
|
||||
// If unset, the default timeout will be set to 10 seconds.
|
||||
func WithTimeout(duration time.Duration) Option {
|
||||
return wrappedOption{otlpconfig.WithTimeout(duration)}
|
||||
}
|
||||
|
||||
// WithRetry sets the retry policy for transient retryable errors that may be
|
||||
// returned by the target endpoint when exporting a batch of spans.
|
||||
//
|
||||
// If the target endpoint responds with not only a retryable error, but
|
||||
// explicitly returns a backoff time in the response. That time will take
|
||||
// precedence over these settings.
|
||||
//
|
||||
// These settings do not define any network retry strategy. That is entirely
|
||||
// handled by the gRPC ClientConn.
|
||||
//
|
||||
// If unset, the default retry policy will be used. It will retry the export
|
||||
// 5 seconds after receiving a retryable error and increase exponentially
|
||||
// after each error for no more than a total time of 1 minute.
|
||||
func WithRetry(settings RetryConfig) Option {
|
||||
return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))}
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetrichttp_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"math/big"
|
||||
mathrand "math/rand"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type mathRandReader struct{}
|
||||
|
||||
func (mathRandReader) Read(p []byte) (n int, err error) {
|
||||
return mathrand.Read(p)
|
||||
}
|
||||
|
||||
var randReader mathRandReader
|
||||
|
||||
type pemCertificate struct {
|
||||
Certificate []byte
|
||||
PrivateKey []byte
|
||||
}
|
||||
|
||||
// Based on https://golang.org/src/crypto/tls/generate_cert.go,
|
||||
// simplified and weakened.
|
||||
func generateWeakCertificate() (*pemCertificate, error) {
|
||||
priv, err := ecdsa.GenerateKey(elliptic.P256(), randReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyUsage := x509.KeyUsageDigitalSignature
|
||||
notBefore := time.Now()
|
||||
notAfter := notBefore.Add(time.Hour)
|
||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||
serialNumber, err := cryptorand.Int(randReader, serialNumberLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
template := x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{
|
||||
Organization: []string{"otel-go"},
|
||||
},
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
KeyUsage: keyUsage,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
BasicConstraintsValid: true,
|
||||
DNSNames: []string{"localhost"},
|
||||
IPAddresses: []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)},
|
||||
}
|
||||
derBytes, err := x509.CreateCertificate(randReader, &template, &template, &priv.PublicKey, priv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certificateBuffer := new(bytes.Buffer)
|
||||
if err := pem.Encode(certificateBuffer, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privDERBytes, err := x509.MarshalPKCS8PrivateKey(priv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privBuffer := new(bytes.Buffer)
|
||||
if err := pem.Encode(privBuffer, &pem.Block{Type: "PRIVATE KEY", Bytes: privDERBytes}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pemCertificate{
|
||||
Certificate: certificateBuffer.Bytes(),
|
||||
PrivateKey: privBuffer.Bytes(),
|
||||
}, nil
|
||||
}
|
||||
@@ -12,6 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
|
||||
import (
|
||||
@@ -31,24 +34,35 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
const contentTypeProto = "application/x-protobuf"
|
||||
// New returns an OpenTelemetry metric Exporter. The Exporter can be used with
|
||||
// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving
|
||||
// endpoint using protobufs over HTTP.
|
||||
func New(_ context.Context, opts ...Option) (metric.Exporter, error) {
|
||||
c, err := newClient(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return otlpmetric.New(c), nil
|
||||
}
|
||||
|
||||
var gzPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
w := gzip.NewWriter(io.Discard)
|
||||
return w
|
||||
},
|
||||
type client struct {
|
||||
// req is cloned for every upload the client makes.
|
||||
req *http.Request
|
||||
compression Compression
|
||||
requestFunc retry.RequestFunc
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// Keep it in sync with golang's DefaultTransport from net/http! We
|
||||
// have our own copy to avoid handling a situation where the
|
||||
// DefaultTransport is overwritten with some different implementation
|
||||
// of http.RoundTripper or it's modified by other package.
|
||||
// of http.RoundTripper or it's modified by another package.
|
||||
var ourTransport = &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
@@ -62,19 +76,9 @@ var ourTransport = &http.Transport{
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
type client struct {
|
||||
name string
|
||||
cfg otlpconfig.SignalConfig
|
||||
generalCfg otlpconfig.Config
|
||||
requestFunc retry.RequestFunc
|
||||
client *http.Client
|
||||
stopCh chan struct{}
|
||||
stopOnce sync.Once
|
||||
}
|
||||
|
||||
// NewClient creates a new HTTP metric client.
|
||||
func NewClient(opts ...Option) otlpmetric.Client {
|
||||
cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...)
|
||||
// newClient creates a new HTTP metric client.
|
||||
func newClient(opts ...Option) (otlpmetric.Client, error) {
|
||||
cfg := oconf.NewHTTPConfig(asHTTPOptions(opts)...)
|
||||
|
||||
httpClient := &http.Client{
|
||||
Transport: ourTransport,
|
||||
@@ -86,68 +90,79 @@ func NewClient(opts ...Option) otlpmetric.Client {
|
||||
httpClient.Transport = transport
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
u := &url.URL{
|
||||
Scheme: "https",
|
||||
Host: cfg.Metrics.Endpoint,
|
||||
Path: cfg.Metrics.URLPath,
|
||||
}
|
||||
if cfg.Metrics.Insecure {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
// Body is set when this is cloned during upload.
|
||||
req, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if n := len(cfg.Metrics.Headers); n > 0 {
|
||||
for k, v := range cfg.Metrics.Headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-protobuf")
|
||||
|
||||
return &client{
|
||||
name: "metrics",
|
||||
cfg: cfg.Metrics,
|
||||
generalCfg: cfg,
|
||||
compression: Compression(cfg.Metrics.Compression),
|
||||
req: req,
|
||||
requestFunc: cfg.RetryConfig.RequestFunc(evaluate),
|
||||
stopCh: stopCh,
|
||||
client: httpClient,
|
||||
}
|
||||
httpClient: httpClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start does nothing in a HTTP client.
|
||||
func (d *client) Start(ctx context.Context) error {
|
||||
// nothing to do
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
// ForceFlush does nothing, the client holds no state.
|
||||
func (c *client) ForceFlush(ctx context.Context) error { return ctx.Err() }
|
||||
|
||||
// Shutdown shuts down the client, freeing all resources.
|
||||
func (c *client) Shutdown(ctx context.Context) error {
|
||||
// The otlpmetric.Exporter synchronizes access to client methods and
|
||||
// ensures this is called only once. The only thing that needs to be done
|
||||
// here is to release any computational resources the client holds.
|
||||
|
||||
c.requestFunc = nil
|
||||
c.httpClient = nil
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
// Stop shuts down the client and interrupt any in-flight request.
|
||||
func (d *client) Stop(ctx context.Context) error {
|
||||
d.stopOnce.Do(func() {
|
||||
close(d.stopCh)
|
||||
})
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// UploadMetrics sends protoMetrics to the connected endpoint.
|
||||
//
|
||||
// Retryable errors from the server will be handled according to any
|
||||
// RetryConfig the client was created with.
|
||||
func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
|
||||
// The otlpmetric.Exporter synchronizes access to client methods, and
|
||||
// ensures this is not called after the Exporter is shutdown. Only thing
|
||||
// to do here is send data.
|
||||
|
||||
// UploadMetrics sends a batch of metrics to the collector.
|
||||
func (d *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
|
||||
pbRequest := &colmetricpb.ExportMetricsServiceRequest{
|
||||
ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics},
|
||||
}
|
||||
rawRequest, err := proto.Marshal(pbRequest)
|
||||
body, err := proto.Marshal(pbRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
request, err := c.newRequest(ctx, body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := d.contextWithStop(ctx)
|
||||
defer cancel()
|
||||
|
||||
request, err := d.newRequest(rawRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return d.requestFunc(ctx, func(ctx context.Context) error {
|
||||
return c.requestFunc(ctx, func(iCtx context.Context) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-iCtx.Done():
|
||||
return iCtx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
request.reset(ctx)
|
||||
resp, err := d.client.Do(request.Request)
|
||||
request.reset(iCtx)
|
||||
resp, err := c.httpClient.Do(request.Request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -167,7 +182,7 @@ func (d *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou
|
||||
return err
|
||||
}
|
||||
default:
|
||||
rErr = fmt.Errorf("failed to send %s to %s: %s", d.name, request.URL, resp.Status)
|
||||
rErr = fmt.Errorf("failed to send metrics to %s: %s", request.URL, resp.Status)
|
||||
}
|
||||
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
@@ -177,20 +192,18 @@ func (d *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou
|
||||
})
|
||||
}
|
||||
|
||||
func (d *client) newRequest(body []byte) (request, error) {
|
||||
u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath}
|
||||
r, err := http.NewRequest(http.MethodPost, u.String(), nil)
|
||||
if err != nil {
|
||||
return request{Request: r}, err
|
||||
}
|
||||
|
||||
for k, v := range d.cfg.Headers {
|
||||
r.Header.Set(k, v)
|
||||
}
|
||||
r.Header.Set("Content-Type", contentTypeProto)
|
||||
var gzPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
w := gzip.NewWriter(io.Discard)
|
||||
return w
|
||||
},
|
||||
}
|
||||
|
||||
func (c *client) newRequest(ctx context.Context, body []byte) (request, error) {
|
||||
r := c.req.Clone(ctx)
|
||||
req := request{Request: r}
|
||||
switch Compression(d.cfg.Compression) {
|
||||
|
||||
switch c.compression {
|
||||
case NoCompression:
|
||||
r.ContentLength = (int64)(len(body))
|
||||
req.bodyReader = bodyReader(body)
|
||||
@@ -249,8 +262,8 @@ type retryableError struct {
|
||||
// throttle delay contained in headers.
|
||||
func newResponseError(header http.Header) error {
|
||||
var rErr retryableError
|
||||
if s, ok := header["Retry-After"]; ok {
|
||||
if t, err := strconv.ParseInt(s[0], 10, 64); err == nil {
|
||||
if v := header.Get("Retry-After"); v != "" {
|
||||
if t, err := strconv.ParseInt(v, 10, 64); err == nil {
|
||||
rErr.throttle = t
|
||||
}
|
||||
}
|
||||
@@ -275,26 +288,3 @@ func evaluate(err error) (bool, time.Duration) {
|
||||
|
||||
return true, time.Duration(rErr.throttle)
|
||||
}
|
||||
|
||||
func (d *client) getScheme() string {
|
||||
if d.cfg.Insecure {
|
||||
return "http"
|
||||
}
|
||||
return "https"
|
||||
}
|
||||
|
||||
func (d *client) contextWithStop(ctx context.Context) (context.Context, context.CancelFunc) {
|
||||
// Unify the parent context Done signal with the client's stop
|
||||
// channel.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
go func(ctx context.Context, cancel context.CancelFunc) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Nothing to do, either cancelled or deadline
|
||||
// happened.
|
||||
case <-d.stopCh:
|
||||
cancel()
|
||||
}
|
||||
}(ctx, cancel)
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
@@ -12,12 +12,18 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetrichttp_test
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package otlpmetrichttp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -25,247 +31,137 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
const (
|
||||
relOtherMetricsPath = "post/metrics/here"
|
||||
otherMetricsPath = "/post/metrics/here"
|
||||
)
|
||||
func TestClient(t *testing.T) {
|
||||
factory := func() (otlpmetric.Client, otest.Collector) {
|
||||
coll, err := otest.NewHTTPCollector("", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
var (
|
||||
oneRecord = otlpmetrictest.OneRecordReader()
|
||||
|
||||
testResource = resource.Empty()
|
||||
)
|
||||
|
||||
var (
|
||||
testHeaders = map[string]string{
|
||||
"Otel-Go-Key-1": "somevalue",
|
||||
"Otel-Go-Key-2": "someothervalue",
|
||||
}
|
||||
)
|
||||
|
||||
func TestEndToEnd(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
opts []otlpmetrichttp.Option
|
||||
mcCfg mockCollectorConfig
|
||||
tls bool
|
||||
}{
|
||||
{
|
||||
name: "no extra options",
|
||||
opts: nil,
|
||||
},
|
||||
{
|
||||
name: "with gzip compression",
|
||||
opts: []otlpmetrichttp.Option{
|
||||
otlpmetrichttp.WithCompression(otlpmetrichttp.GzipCompression),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with empty paths (forced to defaults)",
|
||||
opts: []otlpmetrichttp.Option{
|
||||
otlpmetrichttp.WithURLPath(""),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with relative paths",
|
||||
opts: []otlpmetrichttp.Option{
|
||||
otlpmetrichttp.WithURLPath(relOtherMetricsPath),
|
||||
},
|
||||
mcCfg: mockCollectorConfig{
|
||||
MetricsURLPath: otherMetricsPath,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with TLS",
|
||||
opts: nil,
|
||||
mcCfg: mockCollectorConfig{
|
||||
WithTLS: true,
|
||||
},
|
||||
tls: true,
|
||||
},
|
||||
{
|
||||
name: "with extra headers",
|
||||
opts: []otlpmetrichttp.Option{
|
||||
otlpmetrichttp.WithHeaders(testHeaders),
|
||||
},
|
||||
mcCfg: mockCollectorConfig{
|
||||
ExpectedHeaders: testHeaders,
|
||||
},
|
||||
},
|
||||
addr := coll.Addr().String()
|
||||
client, err := newClient(WithEndpoint(addr), WithInsecure())
|
||||
require.NoError(t, err)
|
||||
return client, coll
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
mc := runMockCollector(t, tc.mcCfg)
|
||||
defer mc.MustStop(t)
|
||||
allOpts := []otlpmetrichttp.Option{
|
||||
otlpmetrichttp.WithEndpoint(mc.Endpoint()),
|
||||
}
|
||||
if tc.tls {
|
||||
tlsConfig := mc.ClientTLSConfig()
|
||||
require.NotNil(t, tlsConfig)
|
||||
allOpts = append(allOpts, otlpmetrichttp.WithTLSClientConfig(tlsConfig))
|
||||
} else {
|
||||
allOpts = append(allOpts, otlpmetrichttp.WithInsecure())
|
||||
}
|
||||
allOpts = append(allOpts, tc.opts...)
|
||||
client := otlpmetrichttp.NewClient(allOpts...)
|
||||
ctx := context.Background()
|
||||
exporter, err := otlpmetric.New(ctx, client)
|
||||
if assert.NoError(t, err) {
|
||||
defer func() {
|
||||
assert.NoError(t, exporter.Shutdown(ctx))
|
||||
}()
|
||||
otlpmetrictest.RunEndToEndTest(ctx, t, exporter, mc)
|
||||
}
|
||||
})
|
||||
}
|
||||
t.Run("Integration", otest.RunClientTests(factory))
|
||||
}
|
||||
|
||||
func TestExporterShutdown(t *testing.T) {
|
||||
mc := runMockCollector(t, mockCollectorConfig{})
|
||||
defer func() {
|
||||
_ = mc.Stop()
|
||||
}()
|
||||
func TestConfig(t *testing.T) {
|
||||
factoryFunc := func(ePt string, errCh <-chan error, o ...Option) (metric.Exporter, *otest.HTTPCollector) {
|
||||
coll, err := otest.NewHTTPCollector(ePt, errCh)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-time.After(5 * time.Millisecond)
|
||||
opts := []Option{WithEndpoint(coll.Addr().String())}
|
||||
if !strings.HasPrefix(strings.ToLower(ePt), "https") {
|
||||
opts = append(opts, WithInsecure())
|
||||
}
|
||||
opts = append(opts, o...)
|
||||
|
||||
otlpmetrictest.RunExporterShutdownTest(t, func() otlpmetric.Client {
|
||||
return otlpmetrichttp.NewClient(
|
||||
otlpmetrichttp.WithInsecure(),
|
||||
otlpmetrichttp.WithEndpoint(mc.endpoint),
|
||||
ctx := context.Background()
|
||||
exp, err := New(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
return exp, coll
|
||||
}
|
||||
|
||||
t.Run("WithHeaders", func(t *testing.T) {
|
||||
key := http.CanonicalHeaderKey("my-custom-header")
|
||||
headers := map[string]string{key: "custom-value"}
|
||||
exp, coll := factoryFunc("", nil, WithHeaders(headers))
|
||||
ctx := context.Background()
|
||||
t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) })
|
||||
require.NoError(t, exp.Export(ctx, metricdata.ResourceMetrics{}))
|
||||
// Ensure everything is flushed.
|
||||
require.NoError(t, exp.Shutdown(ctx))
|
||||
|
||||
got := coll.Headers()
|
||||
require.Contains(t, got, key)
|
||||
assert.Equal(t, got[key], []string{headers[key]})
|
||||
})
|
||||
|
||||
t.Run("WithTimeout", func(t *testing.T) {
|
||||
// Do not send on errCh so the Collector never responds to the client.
|
||||
errCh := make(chan error)
|
||||
exp, coll := factoryFunc(
|
||||
"",
|
||||
errCh,
|
||||
WithTimeout(time.Millisecond),
|
||||
WithRetry(RetryConfig{Enabled: false}),
|
||||
)
|
||||
ctx := context.Background()
|
||||
t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) })
|
||||
// Push this after Shutdown so the HTTP server doesn't hang.
|
||||
t.Cleanup(func() { close(errCh) })
|
||||
t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) })
|
||||
err := exp.Export(ctx, metricdata.ResourceMetrics{})
|
||||
assert.ErrorContains(t, err, context.DeadlineExceeded.Error())
|
||||
})
|
||||
|
||||
t.Run("WithCompressionGZip", func(t *testing.T) {
|
||||
exp, coll := factoryFunc("", nil, WithCompression(GzipCompression))
|
||||
ctx := context.Background()
|
||||
t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) })
|
||||
t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) })
|
||||
assert.NoError(t, exp.Export(ctx, metricdata.ResourceMetrics{}))
|
||||
assert.Len(t, coll.Collect().Dump(), 1)
|
||||
})
|
||||
|
||||
t.Run("WithRetry", func(t *testing.T) {
|
||||
emptyErr := errors.New("")
|
||||
errCh := make(chan error, 3)
|
||||
header := http.Header{http.CanonicalHeaderKey("Retry-After"): {"10"}}
|
||||
// Both retryable errors.
|
||||
errCh <- &otest.HTTPResponseError{Status: http.StatusServiceUnavailable, Err: emptyErr, Header: header}
|
||||
errCh <- &otest.HTTPResponseError{Status: http.StatusTooManyRequests, Err: emptyErr}
|
||||
errCh <- nil
|
||||
exp, coll := factoryFunc("", errCh, WithRetry(RetryConfig{
|
||||
Enabled: true,
|
||||
InitialInterval: time.Nanosecond,
|
||||
MaxInterval: time.Millisecond,
|
||||
MaxElapsedTime: time.Minute,
|
||||
}))
|
||||
ctx := context.Background()
|
||||
t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) })
|
||||
// Push this after Shutdown so the HTTP server doesn't hang.
|
||||
t.Cleanup(func() { close(errCh) })
|
||||
t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) })
|
||||
assert.NoError(t, exp.Export(ctx, metricdata.ResourceMetrics{}), "failed retry")
|
||||
assert.Len(t, errCh, 0, "failed HTTP responses did not occur")
|
||||
})
|
||||
|
||||
t.Run("WithURLPath", func(t *testing.T) {
|
||||
path := "/prefix/v2/metrics"
|
||||
ePt := fmt.Sprintf("http://localhost:0%s", path)
|
||||
exp, coll := factoryFunc(ePt, nil, WithURLPath(path))
|
||||
ctx := context.Background()
|
||||
t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) })
|
||||
t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) })
|
||||
assert.NoError(t, exp.Export(ctx, metricdata.ResourceMetrics{}))
|
||||
assert.Len(t, coll.Collect().Dump(), 1)
|
||||
})
|
||||
|
||||
t.Run("WithURLPath", func(t *testing.T) {
|
||||
path := "/prefix/v2/metrics"
|
||||
ePt := fmt.Sprintf("http://localhost:0%s", path)
|
||||
exp, coll := factoryFunc(ePt, nil, WithURLPath(path))
|
||||
ctx := context.Background()
|
||||
t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) })
|
||||
t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) })
|
||||
assert.NoError(t, exp.Export(ctx, metricdata.ResourceMetrics{}))
|
||||
assert.Len(t, coll.Collect().Dump(), 1)
|
||||
})
|
||||
|
||||
t.Run("WithTLSClientConfig", func(t *testing.T) {
|
||||
ePt := "https://localhost:0"
|
||||
tlsCfg := &tls.Config{InsecureSkipVerify: true}
|
||||
exp, coll := factoryFunc(ePt, nil, WithTLSClientConfig(tlsCfg))
|
||||
ctx := context.Background()
|
||||
t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) })
|
||||
t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) })
|
||||
assert.NoError(t, exp.Export(ctx, metricdata.ResourceMetrics{}))
|
||||
assert.Len(t, coll.Collect().Dump(), 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
delay := make(chan struct{})
|
||||
mcCfg := mockCollectorConfig{Delay: delay}
|
||||
mc := runMockCollector(t, mcCfg)
|
||||
defer mc.MustStop(t)
|
||||
defer func() { close(delay) }()
|
||||
client := otlpmetrichttp.NewClient(
|
||||
otlpmetrichttp.WithEndpoint(mc.Endpoint()),
|
||||
otlpmetrichttp.WithInsecure(),
|
||||
otlpmetrichttp.WithTimeout(time.Nanosecond),
|
||||
)
|
||||
ctx := context.Background()
|
||||
exporter, err := otlpmetric.New(ctx, client)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, exporter.Shutdown(ctx))
|
||||
}()
|
||||
err = exporter.Export(ctx, testResource, oneRecord)
|
||||
assert.Equalf(t, true, os.IsTimeout(err), "expected timeout error, got: %v", err)
|
||||
}
|
||||
|
||||
func TestEmptyData(t *testing.T) {
|
||||
mcCfg := mockCollectorConfig{}
|
||||
mc := runMockCollector(t, mcCfg)
|
||||
defer mc.MustStop(t)
|
||||
driver := otlpmetrichttp.NewClient(
|
||||
otlpmetrichttp.WithEndpoint(mc.Endpoint()),
|
||||
otlpmetrichttp.WithInsecure(),
|
||||
)
|
||||
ctx := context.Background()
|
||||
exporter, err := otlpmetric.New(ctx, driver)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, exporter.Shutdown(ctx))
|
||||
}()
|
||||
assert.NoError(t, err)
|
||||
err = exporter.Export(ctx, testResource, oneRecord)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, mc.GetMetrics())
|
||||
}
|
||||
|
||||
func TestCancelledContext(t *testing.T) {
|
||||
statuses := []int{
|
||||
http.StatusBadRequest,
|
||||
}
|
||||
mcCfg := mockCollectorConfig{
|
||||
InjectHTTPStatus: statuses,
|
||||
}
|
||||
mc := runMockCollector(t, mcCfg)
|
||||
defer mc.MustStop(t)
|
||||
driver := otlpmetrichttp.NewClient(
|
||||
otlpmetrichttp.WithEndpoint(mc.Endpoint()),
|
||||
otlpmetrichttp.WithInsecure(),
|
||||
)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
exporter, err := otlpmetric.New(ctx, driver)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, exporter.Shutdown(context.Background()))
|
||||
}()
|
||||
cancel()
|
||||
_ = exporter.Export(ctx, testResource, oneRecord)
|
||||
assert.Empty(t, mc.GetMetrics())
|
||||
}
|
||||
|
||||
func TestDeadlineContext(t *testing.T) {
|
||||
statuses := make([]int, 0, 5)
|
||||
for i := 0; i < cap(statuses); i++ {
|
||||
statuses = append(statuses, http.StatusTooManyRequests)
|
||||
}
|
||||
mcCfg := mockCollectorConfig{
|
||||
InjectHTTPStatus: statuses,
|
||||
}
|
||||
mc := runMockCollector(t, mcCfg)
|
||||
defer mc.MustStop(t)
|
||||
driver := otlpmetrichttp.NewClient(
|
||||
otlpmetrichttp.WithEndpoint(mc.Endpoint()),
|
||||
otlpmetrichttp.WithInsecure(),
|
||||
otlpmetrichttp.WithBackoff(time.Minute),
|
||||
)
|
||||
ctx := context.Background()
|
||||
exporter, err := otlpmetric.New(ctx, driver)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, exporter.Shutdown(context.Background()))
|
||||
}()
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
err = exporter.Export(ctx, testResource, oneRecord)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, mc.GetMetrics())
|
||||
}
|
||||
|
||||
func TestStopWhileExporting(t *testing.T) {
|
||||
statuses := make([]int, 0, 5)
|
||||
for i := 0; i < cap(statuses); i++ {
|
||||
statuses = append(statuses, http.StatusTooManyRequests)
|
||||
}
|
||||
mcCfg := mockCollectorConfig{
|
||||
InjectHTTPStatus: statuses,
|
||||
}
|
||||
mc := runMockCollector(t, mcCfg)
|
||||
defer mc.MustStop(t)
|
||||
driver := otlpmetrichttp.NewClient(
|
||||
otlpmetrichttp.WithEndpoint(mc.Endpoint()),
|
||||
otlpmetrichttp.WithInsecure(),
|
||||
otlpmetrichttp.WithBackoff(time.Minute),
|
||||
)
|
||||
ctx := context.Background()
|
||||
exporter, err := otlpmetric.New(ctx, driver)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, exporter.Shutdown(ctx))
|
||||
}()
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
err := exporter.Export(ctx, testResource, oneRecord)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, mc.GetMetrics())
|
||||
close(doneCh)
|
||||
}()
|
||||
<-time.After(time.Second)
|
||||
err = exporter.Shutdown(ctx)
|
||||
assert.NoError(t, err)
|
||||
<-doneCh
|
||||
}
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetrichttp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUnreasonableBackoff(t *testing.T) {
|
||||
cIface := NewClient(
|
||||
WithEndpoint("http://localhost"),
|
||||
WithInsecure(),
|
||||
WithBackoff(-time.Microsecond),
|
||||
)
|
||||
require.IsType(t, &client{}, cIface)
|
||||
c := cIface.(*client)
|
||||
assert.True(t, c.generalCfg.RetryConfig.Enabled)
|
||||
assert.Equal(t, 5*time.Second, c.generalCfg.RetryConfig.InitialInterval)
|
||||
assert.Equal(t, 300*time.Millisecond, c.generalCfg.RetryConfig.MaxInterval)
|
||||
assert.Equal(t, time.Minute, c.generalCfg.RetryConfig.MaxElapsedTime)
|
||||
}
|
||||
|
||||
func TestUnreasonableMaxAttempts(t *testing.T) {
|
||||
type testcase struct {
|
||||
name string
|
||||
maxAttempts int
|
||||
}
|
||||
for _, tc := range []testcase{
|
||||
{
|
||||
name: "negative max attempts",
|
||||
maxAttempts: -3,
|
||||
},
|
||||
{
|
||||
name: "too large max attempts",
|
||||
maxAttempts: 10,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cIface := NewClient(
|
||||
WithEndpoint("http://localhost"),
|
||||
WithInsecure(),
|
||||
WithMaxAttempts(tc.maxAttempts),
|
||||
)
|
||||
require.IsType(t, &client{}, cIface)
|
||||
c := cIface.(*client)
|
||||
assert.True(t, c.generalCfg.RetryConfig.Enabled)
|
||||
assert.Equal(t, 5*time.Second, c.generalCfg.RetryConfig.InitialInterval)
|
||||
assert.Equal(t, 30*time.Second, c.generalCfg.RetryConfig.MaxInterval)
|
||||
assert.Equal(t, 145*time.Second, c.generalCfg.RetryConfig.MaxElapsedTime)
|
||||
})
|
||||
}
|
||||
}
|
||||
184
exporters/otlp/otlpmetric/otlpmetrichttp/config.go
Normal file
184
exporters/otlp/otlpmetric/otlpmetrichttp/config.go
Normal file
@@ -0,0 +1,184 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
|
||||
)
|
||||
|
||||
// Compression describes the compression used for payloads sent to the
|
||||
// collector.
|
||||
type Compression oconf.Compression
|
||||
|
||||
const (
|
||||
// NoCompression tells the driver to send payloads without
|
||||
// compression.
|
||||
NoCompression = Compression(oconf.NoCompression)
|
||||
// GzipCompression tells the driver to send payloads after
|
||||
// compressing them with gzip.
|
||||
GzipCompression = Compression(oconf.GzipCompression)
|
||||
)
|
||||
|
||||
// Option applies an option to the Exporter.
|
||||
type Option interface {
|
||||
applyHTTPOption(oconf.Config) oconf.Config
|
||||
}
|
||||
|
||||
func asHTTPOptions(opts []Option) []oconf.HTTPOption {
|
||||
converted := make([]oconf.HTTPOption, len(opts))
|
||||
for i, o := range opts {
|
||||
converted[i] = oconf.NewHTTPOption(o.applyHTTPOption)
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
// RetryConfig defines configuration for retrying the export of metric data
|
||||
// that failed.
|
||||
type RetryConfig retry.Config
|
||||
|
||||
type wrappedOption struct {
|
||||
oconf.HTTPOption
|
||||
}
|
||||
|
||||
func (w wrappedOption) applyHTTPOption(cfg oconf.Config) oconf.Config {
|
||||
return w.ApplyHTTPOption(cfg)
|
||||
}
|
||||
|
||||
// WithEndpoint sets the target endpoint the Exporter will connect to. This
|
||||
// endpoint is specified as a host and optional port, no path or scheme should
|
||||
// be included (see WithInsecure and WithURLPath).
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, "localhost:4318" will be used.
|
||||
func WithEndpoint(endpoint string) Option {
|
||||
return wrappedOption{oconf.WithEndpoint(endpoint)}
|
||||
}
|
||||
|
||||
// WithCompression sets the compression strategy the Exporter will use to
|
||||
// compress the HTTP body.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_COMPRESSION or
|
||||
// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
|
||||
// this option is not passed, that variable value will be used. That value can
|
||||
// be either "none" or "gzip". If both are set,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, no compression strategy will be used.
|
||||
func WithCompression(compression Compression) Option {
|
||||
return wrappedOption{oconf.WithCompression(oconf.Compression(compression))}
|
||||
}
|
||||
|
||||
// WithURLPath sets the URL path the Exporter will send requests to.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// environment variable is set, and this option is not passed, the path
|
||||
// contained in that variable value will be used. If both are set,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, "/v1/metrics" will be used.
|
||||
func WithURLPath(urlPath string) Option {
|
||||
return wrappedOption{oconf.WithURLPath(urlPath)}
|
||||
}
|
||||
|
||||
// WithTLSClientConfig sets the TLS configuration the Exporter will use for
|
||||
// HTTP requests.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
|
||||
// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and
|
||||
// this option is not passed, that variable value will be used. The value will
|
||||
// be parsed the filepath of the TLS certificate chain to use. If both are
|
||||
// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, the system default configuration is used.
|
||||
func WithTLSClientConfig(tlsCfg *tls.Config) Option {
|
||||
return wrappedOption{oconf.WithTLSClientConfig(tlsCfg)}
|
||||
}
|
||||
|
||||
// WithInsecure disables client transport security for the Exporter's HTTP
|
||||
// connection.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used to determine client security. If the endpoint has a
|
||||
// scheme of "http" or "unix" client security will be disabled. If both are
|
||||
// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, client security will be used.
|
||||
func WithInsecure() Option {
|
||||
return wrappedOption{oconf.WithInsecure()}
|
||||
}
|
||||
|
||||
// WithHeaders will send the provided headers with each HTTP requests.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. The value will be parsed as a list of key value pairs.
|
||||
// These pairs are expected to be in the W3C Correlation-Context format
|
||||
// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
|
||||
// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, no user headers will be set.
|
||||
func WithHeaders(headers map[string]string) Option {
|
||||
return wrappedOption{oconf.WithHeaders(headers)}
|
||||
}
|
||||
|
||||
// WithTimeout sets the max amount of time an Exporter will attempt an export.
|
||||
//
|
||||
// This takes precedence over any retry settings defined by WithRetry. Once
|
||||
// this time limit has been reached the export is abandoned and the metric
|
||||
// data is dropped.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. The value will be parsed as an integer representing the
|
||||
// timeout in milliseconds. If both are set,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, a timeout of 10 seconds will be used.
|
||||
func WithTimeout(duration time.Duration) Option {
|
||||
return wrappedOption{oconf.WithTimeout(duration)}
|
||||
}
|
||||
|
||||
// WithRetry sets the retry policy for transient retryable errors that are
|
||||
// returned by the target endpoint.
|
||||
//
|
||||
// If the target endpoint responds with not only a retryable error, but
|
||||
// explicitly returns a backoff time in the response, that time will take
|
||||
// precedence over these settings.
|
||||
//
|
||||
// If unset, the default retry policy will be used. It will retry the export
|
||||
// 5 seconds after receiving a retryable error and increase exponentially
|
||||
// after each error for no more than a total time of 1 minute.
|
||||
func WithRetry(rc RetryConfig) Option {
|
||||
return wrappedOption{oconf.WithRetry(retry.Config(rc))}
|
||||
}
|
||||
@@ -12,12 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package otlpmetrichttp provides a client that sends metrics to the collector
|
||||
using HTTP with binary protobuf payloads.
|
||||
|
||||
This package is currently in a pre-GA phase. Backwards incompatible changes
|
||||
may be introduced in subsequent minor version releases as we work to track the
|
||||
evolving OpenTelemetry specification and user feedback.
|
||||
*/
|
||||
// Package otlpmetrichttp provides an otlpmetric.Exporter that communicates
|
||||
// with an OTLP receiving endpoint using protobuf encoded metric data over
|
||||
// HTTP.
|
||||
package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
|
||||
45
exporters/otlp/otlpmetric/otlpmetrichttp/example_test.go
Normal file
45
exporters/otlp/otlpmetric/otlpmetrichttp/example_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package otlpmetrichttp_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
"go.opentelemetry.io/otel/metric/global"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
ctx := context.Background()
|
||||
exp, err := otlpmetrichttp.New(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
meterProvider := metric.NewMeterProvider(metric.WithReader(metric.NewPeriodicReader(exp)))
|
||||
defer func() {
|
||||
if err := meterProvider.Shutdown(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
global.SetMeterProvider(meterProvider)
|
||||
|
||||
// From here, the meterProvider can be used by instrumentation to collect
|
||||
// telemetry.
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
)
|
||||
|
||||
// New constructs a new Exporter and starts it.
|
||||
func New(ctx context.Context, opts ...Option) (*otlpmetric.Exporter, error) {
|
||||
return otlpmetric.New(ctx, NewClient(opts...))
|
||||
}
|
||||
|
||||
// NewUnstarted constructs a new Exporter and does not start it.
|
||||
func NewUnstarted(opts ...Option) *otlpmetric.Exporter {
|
||||
return otlpmetric.NewUnstarted(NewClient(opts...))
|
||||
}
|
||||
@@ -1,12 +1,13 @@
|
||||
module go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
|
||||
|
||||
go 1.17
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/stretchr/testify v1.7.1
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.31.0
|
||||
go.opentelemetry.io/otel/sdk v1.10.0
|
||||
go.opentelemetry.io/otel/metric v0.31.0
|
||||
go.opentelemetry.io/otel/sdk/metric v0.31.0
|
||||
go.opentelemetry.io/proto/otlp v0.19.0
|
||||
google.golang.org/protobuf v1.28.0
|
||||
)
|
||||
@@ -17,11 +18,11 @@ require (
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
go.opentelemetry.io/otel v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.31.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v0.31.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.10.0 // indirect
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 // indirect
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect
|
||||
|
||||
@@ -35,7 +35,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@@ -114,6 +113,7 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
|
||||
@@ -1,239 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetrichttp_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest"
|
||||
collectormetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
type mockCollector struct {
|
||||
endpoint string
|
||||
server *http.Server
|
||||
|
||||
spanLock sync.Mutex
|
||||
metricsStorage otlpmetrictest.MetricsStorage
|
||||
|
||||
injectHTTPStatus []int
|
||||
injectContentType string
|
||||
delay <-chan struct{}
|
||||
|
||||
clientTLSConfig *tls.Config
|
||||
expectedHeaders map[string]string
|
||||
}
|
||||
|
||||
func (c *mockCollector) Stop() error {
|
||||
return c.server.Shutdown(context.Background())
|
||||
}
|
||||
|
||||
func (c *mockCollector) MustStop(t *testing.T) {
|
||||
assert.NoError(t, c.server.Shutdown(context.Background()))
|
||||
}
|
||||
|
||||
func (c *mockCollector) GetMetrics() []*metricpb.Metric {
|
||||
c.spanLock.Lock()
|
||||
defer c.spanLock.Unlock()
|
||||
return c.metricsStorage.GetMetrics()
|
||||
}
|
||||
|
||||
func (c *mockCollector) Endpoint() string {
|
||||
return c.endpoint
|
||||
}
|
||||
|
||||
func (c *mockCollector) ClientTLSConfig() *tls.Config {
|
||||
return c.clientTLSConfig
|
||||
}
|
||||
|
||||
func (c *mockCollector) serveMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
if c.delay != nil {
|
||||
select {
|
||||
case <-c.delay:
|
||||
case <-r.Context().Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !c.checkHeaders(r) {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
response := collectormetricpb.ExportMetricsServiceResponse{}
|
||||
rawResponse, err := proto.Marshal(&response)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if injectedStatus := c.getInjectHTTPStatus(); injectedStatus != 0 {
|
||||
writeReply(w, rawResponse, injectedStatus, c.injectContentType)
|
||||
return
|
||||
}
|
||||
rawRequest, err := readRequest(r)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
request, err := unmarshalMetricsRequest(rawRequest, r.Header.Get("content-type"))
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
writeReply(w, rawResponse, 0, c.injectContentType)
|
||||
c.spanLock.Lock()
|
||||
defer c.spanLock.Unlock()
|
||||
c.metricsStorage.AddMetrics(request)
|
||||
}
|
||||
|
||||
func unmarshalMetricsRequest(rawRequest []byte, contentType string) (*collectormetricpb.ExportMetricsServiceRequest, error) {
|
||||
request := &collectormetricpb.ExportMetricsServiceRequest{}
|
||||
if contentType != "application/x-protobuf" {
|
||||
return request, fmt.Errorf("invalid content-type: %s, only application/x-protobuf is supported", contentType)
|
||||
}
|
||||
err := proto.Unmarshal(rawRequest, request)
|
||||
return request, err
|
||||
}
|
||||
|
||||
func (c *mockCollector) checkHeaders(r *http.Request) bool {
|
||||
for k, v := range c.expectedHeaders {
|
||||
got := r.Header.Get(k)
|
||||
if got != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *mockCollector) getInjectHTTPStatus() int {
|
||||
if len(c.injectHTTPStatus) == 0 {
|
||||
return 0
|
||||
}
|
||||
status := c.injectHTTPStatus[0]
|
||||
c.injectHTTPStatus = c.injectHTTPStatus[1:]
|
||||
if len(c.injectHTTPStatus) == 0 {
|
||||
c.injectHTTPStatus = nil
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func readRequest(r *http.Request) ([]byte, error) {
|
||||
if r.Header.Get("Content-Encoding") == "gzip" {
|
||||
return readGzipBody(r.Body)
|
||||
}
|
||||
return io.ReadAll(r.Body)
|
||||
}
|
||||
|
||||
func readGzipBody(body io.Reader) ([]byte, error) {
|
||||
rawRequest := bytes.Buffer{}
|
||||
gunzipper, err := gzip.NewReader(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer gunzipper.Close()
|
||||
_, err = io.Copy(&rawRequest, gunzipper)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rawRequest.Bytes(), nil
|
||||
}
|
||||
|
||||
func writeReply(w http.ResponseWriter, rawResponse []byte, injectHTTPStatus int, injectContentType string) {
|
||||
status := http.StatusOK
|
||||
if injectHTTPStatus != 0 {
|
||||
status = injectHTTPStatus
|
||||
}
|
||||
contentType := "application/x-protobuf"
|
||||
if injectContentType != "" {
|
||||
contentType = injectContentType
|
||||
}
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
w.WriteHeader(status)
|
||||
_, _ = w.Write(rawResponse)
|
||||
}
|
||||
|
||||
type mockCollectorConfig struct {
|
||||
MetricsURLPath string
|
||||
Port int
|
||||
InjectHTTPStatus []int
|
||||
InjectContentType string
|
||||
Delay <-chan struct{}
|
||||
WithTLS bool
|
||||
ExpectedHeaders map[string]string
|
||||
}
|
||||
|
||||
func (c *mockCollectorConfig) fillInDefaults() {
|
||||
if c.MetricsURLPath == "" {
|
||||
c.MetricsURLPath = otlpconfig.DefaultMetricsPath
|
||||
}
|
||||
}
|
||||
|
||||
func runMockCollector(t *testing.T, cfg mockCollectorConfig) *mockCollector {
|
||||
cfg.fillInDefaults()
|
||||
ln, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", cfg.Port))
|
||||
require.NoError(t, err)
|
||||
_, portStr, err := net.SplitHostPort(ln.Addr().String())
|
||||
require.NoError(t, err)
|
||||
m := &mockCollector{
|
||||
endpoint: fmt.Sprintf("localhost:%s", portStr),
|
||||
metricsStorage: otlpmetrictest.NewMetricsStorage(),
|
||||
injectHTTPStatus: cfg.InjectHTTPStatus,
|
||||
injectContentType: cfg.InjectContentType,
|
||||
delay: cfg.Delay,
|
||||
expectedHeaders: cfg.ExpectedHeaders,
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle(cfg.MetricsURLPath, http.HandlerFunc(m.serveMetrics))
|
||||
server := &http.Server{
|
||||
Handler: mux,
|
||||
}
|
||||
if cfg.WithTLS {
|
||||
pem, err := generateWeakCertificate()
|
||||
require.NoError(t, err)
|
||||
tlsCertificate, err := tls.X509KeyPair(pem.Certificate, pem.PrivateKey)
|
||||
require.NoError(t, err)
|
||||
server.TLSConfig = &tls.Config{
|
||||
Certificates: []tls.Certificate{tlsCertificate},
|
||||
}
|
||||
|
||||
m.clientTLSConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
if cfg.WithTLS {
|
||||
_ = server.ServeTLS(ln, "", "")
|
||||
} else {
|
||||
_ = server.Serve(ln)
|
||||
}
|
||||
}()
|
||||
m.server = server
|
||||
return m
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/internal/retry"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig"
|
||||
)
|
||||
|
||||
// Compression describes the compression used for payloads sent to the
|
||||
// collector.
|
||||
type Compression otlpconfig.Compression
|
||||
|
||||
const (
|
||||
// NoCompression tells the driver to send payloads without
|
||||
// compression.
|
||||
NoCompression = Compression(otlpconfig.NoCompression)
|
||||
// GzipCompression tells the driver to send payloads after
|
||||
// compressing them with gzip.
|
||||
GzipCompression = Compression(otlpconfig.GzipCompression)
|
||||
)
|
||||
|
||||
// Option applies an option to the HTTP client.
|
||||
type Option interface {
|
||||
applyHTTPOption(otlpconfig.Config) otlpconfig.Config
|
||||
}
|
||||
|
||||
func asHTTPOptions(opts []Option) []otlpconfig.HTTPOption {
|
||||
converted := make([]otlpconfig.HTTPOption, len(opts))
|
||||
for i, o := range opts {
|
||||
converted[i] = otlpconfig.NewHTTPOption(o.applyHTTPOption)
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
// RetryConfig defines configuration for retrying batches in case of export
|
||||
// failure using an exponential backoff.
|
||||
type RetryConfig retry.Config
|
||||
|
||||
type wrappedOption struct {
|
||||
otlpconfig.HTTPOption
|
||||
}
|
||||
|
||||
func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
return w.ApplyHTTPOption(cfg)
|
||||
}
|
||||
|
||||
// WithEndpoint allows one to set the address of the collector endpoint that
|
||||
// the driver will use to send metrics. If unset, it will instead try to use
|
||||
// the default endpoint (localhost:4318). Note that the endpoint must not
|
||||
// contain any URL path.
|
||||
func WithEndpoint(endpoint string) Option {
|
||||
return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
|
||||
}
|
||||
|
||||
// WithCompression tells the driver to compress the sent data.
|
||||
func WithCompression(compression Compression) Option {
|
||||
return wrappedOption{otlpconfig.WithCompression(otlpconfig.Compression(compression))}
|
||||
}
|
||||
|
||||
// WithURLPath allows one to override the default URL path used
|
||||
// for sending metrics. If unset, default ("/v1/metrics") will be used.
|
||||
func WithURLPath(urlPath string) Option {
|
||||
return wrappedOption{otlpconfig.WithURLPath(urlPath)}
|
||||
}
|
||||
|
||||
// WithMaxAttempts allows one to override how many times the driver
|
||||
// will try to send the payload in case of retryable errors.
|
||||
// The max attempts is limited to at most 5 retries. If unset,
|
||||
// default (5) will be used.
|
||||
//
|
||||
// Deprecated: Use WithRetry instead.
|
||||
func WithMaxAttempts(maxAttempts int) Option {
|
||||
if maxAttempts > 5 || maxAttempts < 0 {
|
||||
maxAttempts = 5
|
||||
}
|
||||
return wrappedOption{
|
||||
otlpconfig.NewHTTPOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
cfg.RetryConfig.Enabled = true
|
||||
|
||||
var (
|
||||
init = cfg.RetryConfig.InitialInterval
|
||||
maxI = cfg.RetryConfig.MaxInterval
|
||||
maxE = cfg.RetryConfig.MaxElapsedTime
|
||||
)
|
||||
|
||||
if init == 0 {
|
||||
init = retry.DefaultConfig.InitialInterval
|
||||
}
|
||||
if maxI == 0 {
|
||||
maxI = retry.DefaultConfig.MaxInterval
|
||||
}
|
||||
if maxE == 0 {
|
||||
maxE = retry.DefaultConfig.MaxElapsedTime
|
||||
}
|
||||
attempts := int64(maxE+init) / int64(maxI)
|
||||
|
||||
if int64(maxAttempts) == attempts {
|
||||
return cfg
|
||||
}
|
||||
|
||||
maxE = time.Duration(int64(maxAttempts)*int64(maxI)) - init
|
||||
|
||||
cfg.RetryConfig.InitialInterval = init
|
||||
cfg.RetryConfig.MaxInterval = maxI
|
||||
cfg.RetryConfig.MaxElapsedTime = maxE
|
||||
|
||||
return cfg
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// WithBackoff tells the driver to use the duration as a base of the
|
||||
// exponential backoff strategy. If unset, default (300ms) will be
|
||||
// used.
|
||||
//
|
||||
// Deprecated: Use WithRetry instead.
|
||||
func WithBackoff(duration time.Duration) Option {
|
||||
if duration < 0 {
|
||||
duration = 300 * time.Millisecond
|
||||
}
|
||||
return wrappedOption{
|
||||
otlpconfig.NewHTTPOption(func(cfg otlpconfig.Config) otlpconfig.Config {
|
||||
cfg.RetryConfig.Enabled = true
|
||||
cfg.RetryConfig.MaxInterval = duration
|
||||
if cfg.RetryConfig.InitialInterval == 0 {
|
||||
cfg.RetryConfig.InitialInterval = retry.DefaultConfig.InitialInterval
|
||||
}
|
||||
if cfg.RetryConfig.MaxElapsedTime == 0 {
|
||||
cfg.RetryConfig.MaxElapsedTime = retry.DefaultConfig.MaxElapsedTime
|
||||
}
|
||||
return cfg
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// WithTLSClientConfig can be used to set up a custom TLS
|
||||
// configuration for the client used to send payloads to the
|
||||
// collector. Use it if you want to use a custom certificate.
|
||||
func WithTLSClientConfig(tlsCfg *tls.Config) Option {
|
||||
return wrappedOption{otlpconfig.WithTLSClientConfig(tlsCfg)}
|
||||
}
|
||||
|
||||
// WithInsecure tells the driver to connect to the collector using the
|
||||
// HTTP scheme, instead of HTTPS.
|
||||
func WithInsecure() Option {
|
||||
return wrappedOption{otlpconfig.WithInsecure()}
|
||||
}
|
||||
|
||||
// WithHeaders allows one to tell the driver to send additional HTTP
|
||||
// headers with the payloads. Specifying headers like Content-Length,
|
||||
// Content-Encoding and Content-Type may result in a broken driver.
|
||||
func WithHeaders(headers map[string]string) Option {
|
||||
return wrappedOption{otlpconfig.WithHeaders(headers)}
|
||||
}
|
||||
|
||||
// WithTimeout tells the driver the max waiting time for the backend to process
|
||||
// each metrics batch. If unset, the default will be 10 seconds.
|
||||
func WithTimeout(duration time.Duration) Option {
|
||||
return wrappedOption{otlpconfig.WithTimeout(duration)}
|
||||
}
|
||||
|
||||
// WithRetry configures the retry policy for transient errors that may occurs
|
||||
// when exporting traces. An exponential back-off algorithm is used to ensure
|
||||
// endpoints are not overwhelmed with retries. If unset, the default retry
|
||||
// policy will retry after 5 seconds and increase exponentially after each
|
||||
// error for a total of 1 minute.
|
||||
func WithRetry(rc RetryConfig) Option {
|
||||
return wrappedOption{otlpconfig.WithRetry(retry.Config(rc))}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
# OpenTelemetry-Go Prometheus Exporter
|
||||
|
||||
OpenTelemetry Prometheus exporter
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
go get -u go.opentelemetry.io/otel/exporters/prometheus
|
||||
```
|
||||
@@ -12,12 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package number provides a number abstraction for instruments that
|
||||
either support int64 or float64 input values.
|
||||
|
||||
This package is currently in a pre-GA phase. Backwards incompatible changes
|
||||
may be introduced in subsequent minor version releases as we work to track the
|
||||
evolving OpenTelemetry specification and user feedback.
|
||||
*/
|
||||
package number // import "go.opentelemetry.io/otel/sdk/metric/number"
|
||||
// Package prometheus provides a Prometheus Exporter that converts
|
||||
// OTLP metrics into the Prometheus exposition format and implements
|
||||
// prometheus.Collector to provide a handler for these metrics.
|
||||
package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
|
||||
233
exporters/prometheus/exporter.go
Normal file
233
exporters/prometheus/exporter.go
Normal file
@@ -0,0 +1,233 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// Exporter is a Prometheus Exporter that embeds the OTel metric.Reader
|
||||
// interface for easy instantiation with a MeterProvider.
|
||||
type Exporter struct {
|
||||
metric.Reader
|
||||
Collector prometheus.Collector
|
||||
}
|
||||
|
||||
// collector is used to implement prometheus.Collector.
|
||||
type collector struct {
|
||||
metric.Reader
|
||||
}
|
||||
|
||||
// config is added here to allow for options expansion in the future.
|
||||
type config struct{}
|
||||
|
||||
// Option may be used in the future to apply options to a Prometheus Exporter config.
|
||||
type Option interface {
|
||||
apply(config) config
|
||||
}
|
||||
|
||||
// New returns a Prometheus Exporter.
|
||||
func New(_ ...Option) Exporter {
|
||||
// this assumes that the default temporality selector will always return cumulative.
|
||||
// we only support cumulative temporality, so building our own reader enforces this.
|
||||
reader := metric.NewManualReader()
|
||||
e := Exporter{
|
||||
Reader: reader,
|
||||
Collector: &collector{
|
||||
Reader: reader,
|
||||
},
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// Describe implements prometheus.Collector.
|
||||
func (c *collector) Describe(ch chan<- *prometheus.Desc) {
|
||||
metrics, err := c.Reader.Collect(context.TODO())
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
for _, metricData := range getMetricData(metrics) {
|
||||
ch <- metricData.description
|
||||
}
|
||||
}
|
||||
|
||||
// Collect implements prometheus.Collector.
|
||||
func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
||||
metrics, err := c.Reader.Collect(context.TODO())
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
|
||||
// TODO(#3166): convert otel resource to target_info
|
||||
// see https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#resource-attributes-1
|
||||
for _, metricData := range getMetricData(metrics) {
|
||||
if metricData.valueType == prometheus.UntypedValue {
|
||||
m, err := prometheus.NewConstHistogram(metricData.description, metricData.histogramCount, metricData.histogramSum, metricData.histogramBuckets, metricData.attributeValues...)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
ch <- m
|
||||
} else {
|
||||
m, err := prometheus.NewConstMetric(metricData.description, metricData.valueType, metricData.value, metricData.attributeValues...)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
ch <- m
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// metricData holds the metadata as well as values for individual data points.
|
||||
type metricData struct {
|
||||
// name should include the unit as a suffix (before _total on counters)
|
||||
// see https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#metric-metadata-1
|
||||
name string
|
||||
description *prometheus.Desc
|
||||
attributeValues []string
|
||||
valueType prometheus.ValueType
|
||||
value float64
|
||||
histogramCount uint64
|
||||
histogramSum float64
|
||||
histogramBuckets map[float64]uint64
|
||||
}
|
||||
|
||||
func getMetricData(metrics metricdata.ResourceMetrics) []*metricData {
|
||||
allMetrics := make([]*metricData, 0)
|
||||
for _, scopeMetrics := range metrics.ScopeMetrics {
|
||||
for _, m := range scopeMetrics.Metrics {
|
||||
switch v := m.Data.(type) {
|
||||
case metricdata.Histogram:
|
||||
allMetrics = append(allMetrics, getHistogramMetricData(v, m)...)
|
||||
case metricdata.Sum[int64]:
|
||||
allMetrics = append(allMetrics, getSumMetricData(v, m)...)
|
||||
case metricdata.Sum[float64]:
|
||||
allMetrics = append(allMetrics, getSumMetricData(v, m)...)
|
||||
case metricdata.Gauge[int64]:
|
||||
allMetrics = append(allMetrics, getGaugeMetricData(v, m)...)
|
||||
case metricdata.Gauge[float64]:
|
||||
allMetrics = append(allMetrics, getGaugeMetricData(v, m)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allMetrics
|
||||
}
|
||||
|
||||
func getHistogramMetricData(histogram metricdata.Histogram, m metricdata.Metrics) []*metricData {
|
||||
// TODO(https://github.com/open-telemetry/opentelemetry-go/issues/3163): support exemplars
|
||||
dataPoints := make([]*metricData, 0, len(histogram.DataPoints))
|
||||
for _, dp := range histogram.DataPoints {
|
||||
keys, values := getAttrs(dp.Attributes)
|
||||
desc := prometheus.NewDesc(m.Name, m.Description, keys, nil)
|
||||
buckets := make(map[float64]uint64, len(dp.Bounds))
|
||||
for i, bound := range dp.Bounds {
|
||||
buckets[bound] = dp.BucketCounts[i]
|
||||
}
|
||||
md := &metricData{
|
||||
name: m.Name,
|
||||
description: desc,
|
||||
attributeValues: values,
|
||||
valueType: prometheus.UntypedValue,
|
||||
histogramCount: dp.Count,
|
||||
histogramSum: dp.Sum,
|
||||
histogramBuckets: buckets,
|
||||
}
|
||||
dataPoints = append(dataPoints, md)
|
||||
}
|
||||
return dataPoints
|
||||
}
|
||||
|
||||
func getSumMetricData[N int64 | float64](sum metricdata.Sum[N], m metricdata.Metrics) []*metricData {
|
||||
dataPoints := make([]*metricData, 0, len(sum.DataPoints))
|
||||
for _, dp := range sum.DataPoints {
|
||||
keys, values := getAttrs(dp.Attributes)
|
||||
desc := prometheus.NewDesc(m.Name, m.Description, keys, nil)
|
||||
md := &metricData{
|
||||
name: m.Name,
|
||||
description: desc,
|
||||
attributeValues: values,
|
||||
valueType: prometheus.CounterValue,
|
||||
value: float64(dp.Value),
|
||||
}
|
||||
dataPoints = append(dataPoints, md)
|
||||
}
|
||||
return dataPoints
|
||||
}
|
||||
|
||||
func getGaugeMetricData[N int64 | float64](gauge metricdata.Gauge[N], m metricdata.Metrics) []*metricData {
|
||||
dataPoints := make([]*metricData, 0, len(gauge.DataPoints))
|
||||
for _, dp := range gauge.DataPoints {
|
||||
keys, values := getAttrs(dp.Attributes)
|
||||
desc := prometheus.NewDesc(m.Name, m.Description, keys, nil)
|
||||
md := &metricData{
|
||||
name: m.Name,
|
||||
description: desc,
|
||||
attributeValues: values,
|
||||
valueType: prometheus.GaugeValue,
|
||||
value: float64(dp.Value),
|
||||
}
|
||||
dataPoints = append(dataPoints, md)
|
||||
}
|
||||
return dataPoints
|
||||
}
|
||||
|
||||
// getAttrs parses the attribute.Set to two lists of matching Prometheus-style
|
||||
// keys and values. It sanitizes invalid characters and handles duplicate keys
|
||||
// (due to sanitization) by sorting and concatenating the values following the spec.
|
||||
func getAttrs(attrs attribute.Set) ([]string, []string) {
|
||||
keysMap := make(map[string][]string)
|
||||
itr := attrs.Iter()
|
||||
for itr.Next() {
|
||||
kv := itr.Attribute()
|
||||
key := strings.Map(sanitizeRune, string(kv.Key))
|
||||
if _, ok := keysMap[key]; !ok {
|
||||
keysMap[key] = []string{kv.Value.AsString()}
|
||||
} else {
|
||||
// if the sanitized key is a duplicate, append to the list of keys
|
||||
keysMap[key] = append(keysMap[key], kv.Value.AsString())
|
||||
}
|
||||
}
|
||||
|
||||
keys := make([]string, 0, attrs.Len())
|
||||
values := make([]string, 0, attrs.Len())
|
||||
for key, vals := range keysMap {
|
||||
keys = append(keys, key)
|
||||
sort.Slice(vals, func(i, j int) bool {
|
||||
return i < j
|
||||
})
|
||||
values = append(values, strings.Join(vals, ";"))
|
||||
}
|
||||
return keys, values
|
||||
}
|
||||
|
||||
func sanitizeRune(r rune) rune {
|
||||
if unicode.IsLetter(r) || unicode.IsDigit(r) || r == ':' || r == '_' {
|
||||
return r
|
||||
}
|
||||
return '_'
|
||||
}
|
||||
130
exporters/prometheus/exporter_test.go
Normal file
130
exporters/prometheus/exporter_test.go
Normal file
@@ -0,0 +1,130 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
otelmetric "go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
)
|
||||
|
||||
func TestPrometheusExporter(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
recordMetrics func(ctx context.Context, meter otelmetric.Meter)
|
||||
expectedFile string
|
||||
}{
|
||||
{
|
||||
name: "counter",
|
||||
expectedFile: "testdata/counter.txt",
|
||||
recordMetrics: func(ctx context.Context, meter otelmetric.Meter) {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.Key("A").String("B"),
|
||||
attribute.Key("C").String("D"),
|
||||
}
|
||||
counter, err := meter.SyncFloat64().Counter("foo", instrument.WithDescription("a simple counter"))
|
||||
require.NoError(t, err)
|
||||
counter.Add(ctx, 5, attrs...)
|
||||
counter.Add(ctx, 10.3, attrs...)
|
||||
counter.Add(ctx, 9, attrs...)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gauge",
|
||||
expectedFile: "testdata/gauge.txt",
|
||||
recordMetrics: func(ctx context.Context, meter otelmetric.Meter) {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.Key("A").String("B"),
|
||||
attribute.Key("C").String("D"),
|
||||
}
|
||||
gauge, err := meter.SyncFloat64().UpDownCounter("bar", instrument.WithDescription("a fun little gauge"))
|
||||
require.NoError(t, err)
|
||||
gauge.Add(ctx, 100, attrs...)
|
||||
gauge.Add(ctx, -25, attrs...)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "histogram",
|
||||
expectedFile: "testdata/histogram.txt",
|
||||
recordMetrics: func(ctx context.Context, meter otelmetric.Meter) {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.Key("A").String("B"),
|
||||
attribute.Key("C").String("D"),
|
||||
}
|
||||
histogram, err := meter.SyncFloat64().Histogram("baz", instrument.WithDescription("a very nice histogram"))
|
||||
require.NoError(t, err)
|
||||
histogram.Record(ctx, 23, attrs...)
|
||||
histogram.Record(ctx, 7, attrs...)
|
||||
histogram.Record(ctx, 101, attrs...)
|
||||
histogram.Record(ctx, 105, attrs...)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sanitized attributes to labels",
|
||||
expectedFile: "testdata/sanitized_labels.txt",
|
||||
recordMetrics: func(ctx context.Context, meter otelmetric.Meter) {
|
||||
attrs := []attribute.KeyValue{
|
||||
// exact match, value should be overwritten
|
||||
attribute.Key("A.B").String("X"),
|
||||
attribute.Key("A.B").String("Q"),
|
||||
|
||||
// unintended match due to sanitization, values should be concatenated
|
||||
attribute.Key("C.D").String("Y"),
|
||||
attribute.Key("C/D").String("Z"),
|
||||
}
|
||||
counter, err := meter.SyncFloat64().Counter("foo", instrument.WithDescription("a sanitary counter"))
|
||||
require.NoError(t, err)
|
||||
counter.Add(ctx, 5, attrs...)
|
||||
counter.Add(ctx, 10.3, attrs...)
|
||||
counter.Add(ctx, 9, attrs...)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
exporter := New()
|
||||
provider := metric.NewMeterProvider(metric.WithReader(exporter))
|
||||
meter := provider.Meter("testmeter")
|
||||
|
||||
registry := prometheus.NewRegistry()
|
||||
err := registry.Register(exporter.Collector)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.recordMetrics(ctx, meter)
|
||||
|
||||
file, err := os.Open(tc.expectedFile)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { require.NoError(t, file.Close()) })
|
||||
|
||||
err = testutil.GatherAndCompare(registry, file)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,12 @@
|
||||
module go.opentelemetry.io/otel/exporters/prometheus
|
||||
|
||||
go 1.17
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/stretchr/testify v1.7.1
|
||||
go.opentelemetry.io/otel v1.10.0
|
||||
go.opentelemetry.io/otel/metric v0.31.0
|
||||
go.opentelemetry.io/otel/sdk v1.10.0
|
||||
go.opentelemetry.io/otel/sdk/metric v0.31.0
|
||||
)
|
||||
|
||||
@@ -21,20 +20,21 @@ require (
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.10.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.10.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
google.golang.org/protobuf v1.26.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
|
||||
)
|
||||
|
||||
replace go.opentelemetry.io/otel => ../..
|
||||
|
||||
replace go.opentelemetry.io/otel/metric => ../../metric
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk => ../../sdk
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric
|
||||
|
||||
replace go.opentelemetry.io/otel/trace => ../../trace
|
||||
|
||||
replace go.opentelemetry.io/otel/metric => ../../metric
|
||||
|
||||
@@ -38,7 +38,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
@@ -65,9 +64,11 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
@@ -167,8 +168,9 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
||||
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -177,14 +179,16 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
@@ -269,12 +273,15 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -319,15 +326,20 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -449,8 +461,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
@@ -1,324 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
|
||||
|
||||
// Note that this package does not support a way to export Prometheus
|
||||
// Summary data points, removed in PR#1412.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/number"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// Exporter supports Prometheus pulls. It does not implement the
|
||||
// sdk/export/metric.Exporter interface--instead it creates a pull
|
||||
// controller and reads the latest checkpointed data on-scrape.
|
||||
type Exporter struct {
|
||||
handler http.Handler
|
||||
|
||||
registerer prometheus.Registerer
|
||||
gatherer prometheus.Gatherer
|
||||
|
||||
// lock protects access to the controller. The controller
|
||||
// exposes its own lock, but using a dedicated lock in this
|
||||
// struct allows the exporter to potentially support multiple
|
||||
// controllers (e.g., with different resources).
|
||||
lock sync.RWMutex
|
||||
controller *controller.Controller
|
||||
}
|
||||
|
||||
// ErrUnsupportedAggregator is returned for unrepresentable aggregator
|
||||
// types.
|
||||
var ErrUnsupportedAggregator = fmt.Errorf("unsupported aggregator type")
|
||||
|
||||
var _ http.Handler = &Exporter{}
|
||||
|
||||
// Config is a set of configs for the tally reporter.
|
||||
type Config struct {
|
||||
// Registry is the prometheus registry that will be used as the default Registerer and
|
||||
// Gatherer if these are not specified.
|
||||
//
|
||||
// If not set a new empty Registry is created.
|
||||
Registry *prometheus.Registry
|
||||
|
||||
// Registerer is the prometheus registerer to register
|
||||
// metrics with.
|
||||
//
|
||||
// If not specified the Registry will be used as default.
|
||||
Registerer prometheus.Registerer
|
||||
|
||||
// Gatherer is the prometheus gatherer to gather
|
||||
// metrics with.
|
||||
//
|
||||
// If not specified the Registry will be used as default.
|
||||
Gatherer prometheus.Gatherer
|
||||
|
||||
// DefaultHistogramBoundaries defines the default histogram bucket
|
||||
// boundaries.
|
||||
DefaultHistogramBoundaries []float64
|
||||
}
|
||||
|
||||
// New returns a new Prometheus exporter using the configured metric
|
||||
// controller. See controller.New().
|
||||
func New(config Config, ctrl *controller.Controller) (*Exporter, error) {
|
||||
if config.Registry == nil {
|
||||
config.Registry = prometheus.NewRegistry()
|
||||
}
|
||||
|
||||
if config.Registerer == nil {
|
||||
config.Registerer = config.Registry
|
||||
}
|
||||
|
||||
if config.Gatherer == nil {
|
||||
config.Gatherer = config.Registry
|
||||
}
|
||||
|
||||
e := &Exporter{
|
||||
handler: promhttp.HandlerFor(config.Gatherer, promhttp.HandlerOpts{}),
|
||||
registerer: config.Registerer,
|
||||
gatherer: config.Gatherer,
|
||||
controller: ctrl,
|
||||
}
|
||||
|
||||
c := &collector{
|
||||
exp: e,
|
||||
}
|
||||
if err := config.Registerer.Register(c); err != nil {
|
||||
return nil, fmt.Errorf("cannot register the collector: %w", err)
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// MeterProvider returns the MeterProvider of this exporter.
|
||||
func (e *Exporter) MeterProvider() metric.MeterProvider {
|
||||
return e.controller
|
||||
}
|
||||
|
||||
// Controller returns the controller object that coordinates collection for the SDK.
|
||||
func (e *Exporter) Controller() *controller.Controller {
|
||||
e.lock.RLock()
|
||||
defer e.lock.RUnlock()
|
||||
return e.controller
|
||||
}
|
||||
|
||||
// TemporalityFor implements TemporalitySelector.
|
||||
func (e *Exporter) TemporalityFor(desc *sdkapi.Descriptor, kind aggregation.Kind) aggregation.Temporality {
|
||||
return aggregation.CumulativeTemporalitySelector().TemporalityFor(desc, kind)
|
||||
}
|
||||
|
||||
// ServeHTTP implements http.Handler.
|
||||
func (e *Exporter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
e.handler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// collector implements prometheus.Collector interface.
|
||||
type collector struct {
|
||||
exp *Exporter
|
||||
}
|
||||
|
||||
var _ prometheus.Collector = (*collector)(nil)
|
||||
|
||||
// Describe implements prometheus.Collector.
|
||||
func (c *collector) Describe(ch chan<- *prometheus.Desc) {
|
||||
c.exp.lock.RLock()
|
||||
defer c.exp.lock.RUnlock()
|
||||
|
||||
_ = c.exp.Controller().ForEach(func(_ instrumentation.Library, reader export.Reader) error {
|
||||
return reader.ForEach(c.exp, func(record export.Record) error {
|
||||
var attrKeys []string
|
||||
mergeAttrs(record, c.exp.controller.Resource(), &attrKeys, nil)
|
||||
ch <- c.toDesc(record, attrKeys)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Collect exports the last calculated Reader state.
|
||||
//
|
||||
// Collect is invoked whenever prometheus.Gatherer is also invoked.
|
||||
// For example, when the HTTP endpoint is invoked by Prometheus.
|
||||
func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
||||
c.exp.lock.RLock()
|
||||
defer c.exp.lock.RUnlock()
|
||||
|
||||
ctrl := c.exp.Controller()
|
||||
if err := ctrl.Collect(context.Background()); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
|
||||
err := ctrl.ForEach(func(_ instrumentation.Library, reader export.Reader) error {
|
||||
return reader.ForEach(c.exp, func(record export.Record) error {
|
||||
agg := record.Aggregation()
|
||||
numberKind := record.Descriptor().NumberKind()
|
||||
instrumentKind := record.Descriptor().InstrumentKind()
|
||||
|
||||
var attrKeys, attrs []string
|
||||
mergeAttrs(record, c.exp.controller.Resource(), &attrKeys, &attrs)
|
||||
|
||||
desc := c.toDesc(record, attrKeys)
|
||||
|
||||
switch v := agg.(type) {
|
||||
case aggregation.Histogram:
|
||||
if err := c.exportHistogram(ch, v, numberKind, desc, attrs); err != nil {
|
||||
return fmt.Errorf("exporting histogram: %w", err)
|
||||
}
|
||||
case aggregation.Sum:
|
||||
if instrumentKind.Monotonic() {
|
||||
if err := c.exportMonotonicCounter(ch, v, numberKind, desc, attrs); err != nil {
|
||||
return fmt.Errorf("exporting monotonic counter: %w", err)
|
||||
}
|
||||
} else {
|
||||
if err := c.exportNonMonotonicCounter(ch, v, numberKind, desc, attrs); err != nil {
|
||||
return fmt.Errorf("exporting non monotonic counter: %w", err)
|
||||
}
|
||||
}
|
||||
case aggregation.LastValue:
|
||||
if err := c.exportLastValue(ch, v, numberKind, desc, attrs); err != nil {
|
||||
return fmt.Errorf("exporting last value: %w", err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("%w: %s", ErrUnsupportedAggregator, agg.Kind())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *collector) exportLastValue(ch chan<- prometheus.Metric, lvagg aggregation.LastValue, kind number.Kind, desc *prometheus.Desc, attrs []string) error {
|
||||
lv, _, err := lvagg.LastValue()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving last value: %w", err)
|
||||
}
|
||||
|
||||
m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, lv.CoerceToFloat64(kind), attrs...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating constant metric: %w", err)
|
||||
}
|
||||
|
||||
ch <- m
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) exportNonMonotonicCounter(ch chan<- prometheus.Metric, sum aggregation.Sum, kind number.Kind, desc *prometheus.Desc, attrs []string) error {
|
||||
v, err := sum.Sum()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving counter: %w", err)
|
||||
}
|
||||
|
||||
m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, v.CoerceToFloat64(kind), attrs...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating constant metric: %w", err)
|
||||
}
|
||||
|
||||
ch <- m
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) exportMonotonicCounter(ch chan<- prometheus.Metric, sum aggregation.Sum, kind number.Kind, desc *prometheus.Desc, attrs []string) error {
|
||||
v, err := sum.Sum()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving counter: %w", err)
|
||||
}
|
||||
|
||||
m, err := prometheus.NewConstMetric(desc, prometheus.CounterValue, v.CoerceToFloat64(kind), attrs...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating constant metric: %w", err)
|
||||
}
|
||||
|
||||
ch <- m
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) exportHistogram(ch chan<- prometheus.Metric, hist aggregation.Histogram, kind number.Kind, desc *prometheus.Desc, attrs []string) error {
|
||||
buckets, err := hist.Histogram()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving histogram: %w", err)
|
||||
}
|
||||
sum, err := hist.Sum()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving sum: %w", err)
|
||||
}
|
||||
|
||||
var totalCount uint64
|
||||
// counts maps from the bucket upper-bound to the cumulative count.
|
||||
// The bucket with upper-bound +inf is not included.
|
||||
counts := make(map[float64]uint64, len(buckets.Boundaries))
|
||||
for i := range buckets.Boundaries {
|
||||
boundary := buckets.Boundaries[i]
|
||||
totalCount += uint64(buckets.Counts[i])
|
||||
counts[boundary] = totalCount
|
||||
}
|
||||
// Include the +inf bucket in the total count.
|
||||
totalCount += uint64(buckets.Counts[len(buckets.Counts)-1])
|
||||
|
||||
m, err := prometheus.NewConstHistogram(desc, totalCount, sum.CoerceToFloat64(kind), counts, attrs...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating constant histogram: %w", err)
|
||||
}
|
||||
|
||||
ch <- m
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) toDesc(record export.Record, attrKeys []string) *prometheus.Desc {
|
||||
desc := record.Descriptor()
|
||||
return prometheus.NewDesc(sanitize(desc.Name()), desc.Description(), attrKeys, nil)
|
||||
}
|
||||
|
||||
// mergeAttrs merges the export.Record's attributes and resources into a
|
||||
// single set, giving precedence to the record's attributes in case of
|
||||
// duplicate keys. This outputs one or both of the keys and the values as a
|
||||
// slice, and either argument may be nil to avoid allocating an unnecessary
|
||||
// slice.
|
||||
func mergeAttrs(record export.Record, res *resource.Resource, keys, values *[]string) {
|
||||
if keys != nil {
|
||||
*keys = make([]string, 0, record.Attributes().Len()+res.Len())
|
||||
}
|
||||
if values != nil {
|
||||
*values = make([]string, 0, record.Attributes().Len()+res.Len())
|
||||
}
|
||||
|
||||
// Duplicate keys are resolved by taking the record attribute value over
|
||||
// the resource value.
|
||||
mi := attribute.NewMergeIterator(record.Attributes(), res.Set())
|
||||
for mi.Next() {
|
||||
attr := mi.Attribute()
|
||||
if keys != nil {
|
||||
*keys = append(*keys, sanitize(string(attr.Key)))
|
||||
}
|
||||
if values != nil {
|
||||
*values = append(*values, attr.Value.Emit())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,228 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http/httptest"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/prometheus"
|
||||
"go.opentelemetry.io/otel/metric/instrument"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
selector "go.opentelemetry.io/otel/sdk/metric/selector/simple"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
type expectedMetric struct {
|
||||
kind string
|
||||
name string
|
||||
help string
|
||||
values []string
|
||||
}
|
||||
|
||||
func (e *expectedMetric) lines() []string {
|
||||
ret := []string{
|
||||
fmt.Sprintf("# HELP %s %s", e.name, e.help),
|
||||
fmt.Sprintf("# TYPE %s %s", e.name, e.kind),
|
||||
}
|
||||
|
||||
ret = append(ret, e.values...)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func expectCounterWithHelp(name, help, value string) expectedMetric {
|
||||
return expectedMetric{
|
||||
kind: "counter",
|
||||
name: name,
|
||||
help: help,
|
||||
values: []string{value},
|
||||
}
|
||||
}
|
||||
|
||||
func expectCounter(name, value string) expectedMetric {
|
||||
return expectCounterWithHelp(name, "", value)
|
||||
}
|
||||
|
||||
func expectGauge(name, value string) expectedMetric {
|
||||
return expectedMetric{
|
||||
kind: "gauge",
|
||||
name: name,
|
||||
values: []string{value},
|
||||
}
|
||||
}
|
||||
|
||||
func expectHistogram(name string, values ...string) expectedMetric {
|
||||
return expectedMetric{
|
||||
kind: "histogram",
|
||||
name: name,
|
||||
values: values,
|
||||
}
|
||||
}
|
||||
|
||||
func newPipeline(config prometheus.Config, options ...controller.Option) (*prometheus.Exporter, error) {
|
||||
c := controller.New(
|
||||
processor.NewFactory(
|
||||
selector.NewWithHistogramDistribution(
|
||||
histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),
|
||||
),
|
||||
aggregation.CumulativeTemporalitySelector(),
|
||||
processor.WithMemory(true),
|
||||
),
|
||||
options...,
|
||||
)
|
||||
return prometheus.New(config, c)
|
||||
}
|
||||
|
||||
func TestPrometheusExporter(t *testing.T) {
|
||||
exporter, err := newPipeline(
|
||||
prometheus.Config{
|
||||
DefaultHistogramBoundaries: []float64{-0.5, 1},
|
||||
},
|
||||
controller.WithCollectPeriod(0),
|
||||
controller.WithResource(resource.NewSchemaless(attribute.String("R", "V"))),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
meter := exporter.MeterProvider().Meter("test")
|
||||
upDownCounter, err := meter.SyncFloat64().UpDownCounter("updowncounter")
|
||||
require.NoError(t, err)
|
||||
counter, err := meter.SyncFloat64().Counter("counter")
|
||||
require.NoError(t, err)
|
||||
hist, err := meter.SyncFloat64().Histogram("histogram")
|
||||
require.NoError(t, err)
|
||||
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.Key("A").String("B"),
|
||||
attribute.Key("C").String("D"),
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
var expected []expectedMetric
|
||||
|
||||
counter.Add(ctx, 10, attrs...)
|
||||
counter.Add(ctx, 5.3, attrs...)
|
||||
|
||||
expected = append(expected, expectCounter("counter", `counter{A="B",C="D",R="V"} 15.3`))
|
||||
|
||||
gaugeObserver, err := meter.AsyncInt64().Gauge("intgaugeobserver")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = meter.RegisterCallback([]instrument.Asynchronous{gaugeObserver}, func(ctx context.Context) {
|
||||
gaugeObserver.Observe(ctx, 1, attrs...)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
expected = append(expected, expectGauge("intgaugeobserver", `intgaugeobserver{A="B",C="D",R="V"} 1`))
|
||||
|
||||
hist.Record(ctx, -0.6, attrs...)
|
||||
hist.Record(ctx, -0.4, attrs...)
|
||||
hist.Record(ctx, 0.6, attrs...)
|
||||
hist.Record(ctx, 20, attrs...)
|
||||
|
||||
expected = append(expected, expectHistogram("histogram",
|
||||
`histogram_bucket{A="B",C="D",R="V",le="-0.5"} 1`,
|
||||
`histogram_bucket{A="B",C="D",R="V",le="1"} 3`,
|
||||
`histogram_bucket{A="B",C="D",R="V",le="+Inf"} 4`,
|
||||
`histogram_sum{A="B",C="D",R="V"} 19.6`,
|
||||
`histogram_count{A="B",C="D",R="V"} 4`,
|
||||
))
|
||||
|
||||
upDownCounter.Add(ctx, 10, attrs...)
|
||||
upDownCounter.Add(ctx, -3.2, attrs...)
|
||||
|
||||
expected = append(expected, expectGauge("updowncounter", `updowncounter{A="B",C="D",R="V"} 6.8`))
|
||||
|
||||
counterObserver, err := meter.AsyncFloat64().Counter("floatcounterobserver")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = meter.RegisterCallback([]instrument.Asynchronous{counterObserver}, func(ctx context.Context) {
|
||||
counterObserver.Observe(ctx, 7.7, attrs...)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
expected = append(expected, expectCounter("floatcounterobserver", `floatcounterobserver{A="B",C="D",R="V"} 7.7`))
|
||||
|
||||
upDownCounterObserver, err := meter.AsyncFloat64().UpDownCounter("floatupdowncounterobserver")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = meter.RegisterCallback([]instrument.Asynchronous{upDownCounterObserver}, func(ctx context.Context) {
|
||||
upDownCounterObserver.Observe(ctx, -7.7, attrs...)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
expected = append(expected, expectGauge("floatupdowncounterobserver", `floatupdowncounterobserver{A="B",C="D",R="V"} -7.7`))
|
||||
|
||||
compareExport(t, exporter, expected)
|
||||
compareExport(t, exporter, expected)
|
||||
}
|
||||
|
||||
func compareExport(t *testing.T, exporter *prometheus.Exporter, expected []expectedMetric) {
|
||||
rec := httptest.NewRecorder()
|
||||
req := httptest.NewRequest("GET", "/metrics", nil)
|
||||
exporter.ServeHTTP(rec, req)
|
||||
|
||||
output := rec.Body.String()
|
||||
lines := strings.Split(output, "\n")
|
||||
|
||||
expectedLines := []string{""}
|
||||
for _, v := range expected {
|
||||
expectedLines = append(expectedLines, v.lines()...)
|
||||
}
|
||||
|
||||
sort.Strings(lines)
|
||||
sort.Strings(expectedLines)
|
||||
|
||||
require.Equal(t, expectedLines, lines)
|
||||
}
|
||||
|
||||
func TestPrometheusStatefulness(t *testing.T) {
|
||||
// Create a meter
|
||||
exporter, err := newPipeline(
|
||||
prometheus.Config{},
|
||||
controller.WithCollectPeriod(0),
|
||||
controller.WithResource(resource.Empty()),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
meter := exporter.MeterProvider().Meter("test")
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
counter, err := meter.SyncInt64().Counter("a.counter", instrument.WithDescription("Counts things"))
|
||||
require.NoError(t, err)
|
||||
|
||||
counter.Add(ctx, 100, attribute.String("key", "value"))
|
||||
|
||||
compareExport(t, exporter, []expectedMetric{
|
||||
expectCounterWithHelp("a_counter", "Counts things", `a_counter{key="value"} 100`),
|
||||
})
|
||||
|
||||
counter.Add(ctx, 100, attribute.String("key", "value"))
|
||||
|
||||
compareExport(t, exporter, []expectedMetric{
|
||||
expectCounterWithHelp("a_counter", "Counts things", `a_counter{key="value"} 200`),
|
||||
})
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// TODO(paivagustavo): we should provide a more uniform and controlled way of sanitizing.
|
||||
// Letting users define wether we should try or not to sanitize metric names.
|
||||
// This is a copy of sdk/internal/sanitize.go
|
||||
|
||||
// sanitize returns a string that is truncated to 100 characters if it's too
|
||||
// long, and replaces non-alphanumeric characters to underscores.
|
||||
func sanitize(s string) string {
|
||||
if len(s) == 0 {
|
||||
return s
|
||||
}
|
||||
// TODO(paivagustavo): change this to use a bytes buffer to avoid a large number of string allocations.
|
||||
s = strings.Map(sanitizeRune, s)
|
||||
if unicode.IsDigit(rune(s[0])) {
|
||||
s = "key_" + s
|
||||
}
|
||||
if s[0] == '_' {
|
||||
s = "key" + s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// converts anything that is not a letter or digit to an underscore.
|
||||
func sanitizeRune(r rune) rune {
|
||||
if unicode.IsLetter(r) || unicode.IsDigit(r) {
|
||||
return r
|
||||
}
|
||||
// Everything else turns into an underscore
|
||||
return '_'
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSanitize(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "replace character",
|
||||
input: "test/key-1",
|
||||
want: "test_key_1",
|
||||
},
|
||||
{
|
||||
name: "add prefix if starting with digit",
|
||||
input: "0123456789",
|
||||
want: "key_0123456789",
|
||||
},
|
||||
{
|
||||
name: "add prefix if starting with _",
|
||||
input: "_0123456789",
|
||||
want: "key_0123456789",
|
||||
},
|
||||
{
|
||||
name: "starts with _ after sanitization",
|
||||
input: "/0123456789",
|
||||
want: "key_0123456789",
|
||||
},
|
||||
{
|
||||
name: "valid input",
|
||||
input: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789",
|
||||
want: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got, want := sanitize(tt.input), tt.want; got != want {
|
||||
t.Errorf("sanitize() = %q; want %q", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
3
exporters/prometheus/testdata/counter.txt
vendored
Executable file
3
exporters/prometheus/testdata/counter.txt
vendored
Executable file
@@ -0,0 +1,3 @@
|
||||
# HELP foo a simple counter
|
||||
# TYPE foo counter
|
||||
foo{A="B",C="D"} 24.3
|
||||
3
exporters/prometheus/testdata/gauge.txt
vendored
Normal file
3
exporters/prometheus/testdata/gauge.txt
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# HELP bar a fun little gauge
|
||||
# TYPE bar counter
|
||||
bar{A="B",C="D"} 75
|
||||
15
exporters/prometheus/testdata/histogram.txt
vendored
Normal file
15
exporters/prometheus/testdata/histogram.txt
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# HELP baz a very nice histogram
|
||||
# TYPE baz histogram
|
||||
baz_bucket{A="B",C="D",le="0"} 0
|
||||
baz_bucket{A="B",C="D",le="5"} 0
|
||||
baz_bucket{A="B",C="D",le="10"} 1
|
||||
baz_bucket{A="B",C="D",le="25"} 1
|
||||
baz_bucket{A="B",C="D",le="50"} 0
|
||||
baz_bucket{A="B",C="D",le="75"} 0
|
||||
baz_bucket{A="B",C="D",le="100"} 0
|
||||
baz_bucket{A="B",C="D",le="250"} 2
|
||||
baz_bucket{A="B",C="D",le="500"} 0
|
||||
baz_bucket{A="B",C="D",le="1000"} 0
|
||||
baz_bucket{A="B",C="D",le="+Inf"} 4
|
||||
baz_sum{A="B",C="D"} 236
|
||||
baz_count{A="B",C="D"} 4
|
||||
3
exporters/prometheus/testdata/sanitized_labels.txt
vendored
Executable file
3
exporters/prometheus/testdata/sanitized_labels.txt
vendored
Executable file
@@ -0,0 +1,3 @@
|
||||
# HELP foo a sanitary counter
|
||||
# TYPE foo counter
|
||||
foo{A_B="Q",C_D="Y;Z"} 24.3
|
||||
@@ -1,5 +1,4 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@@ -12,106 +11,55 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"encoding/json"
|
||||
"os"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultWriter = os.Stdout
|
||||
defaultPrettyPrint = false
|
||||
defaultTimestamps = true
|
||||
defaultAttrEncoder = attribute.DefaultEncoder()
|
||||
)
|
||||
|
||||
// config contains options for the STDOUT exporter.
|
||||
// config contains options for the exporter.
|
||||
type config struct {
|
||||
// Writer is the destination. If not set, os.Stdout is used.
|
||||
Writer io.Writer
|
||||
|
||||
// PrettyPrint will encode the output into readable JSON. Default is
|
||||
// false.
|
||||
PrettyPrint bool
|
||||
|
||||
// Timestamps specifies if timestamps should be printed. Default is
|
||||
// true.
|
||||
Timestamps bool
|
||||
|
||||
// Encoder encodes the attributes.
|
||||
Encoder attribute.Encoder
|
||||
encoder *encoderHolder
|
||||
}
|
||||
|
||||
// newConfig creates a validated Config configured with options.
|
||||
func newConfig(options ...Option) (config, error) {
|
||||
cfg := config{
|
||||
Writer: defaultWriter,
|
||||
PrettyPrint: defaultPrettyPrint,
|
||||
Timestamps: defaultTimestamps,
|
||||
Encoder: defaultAttrEncoder,
|
||||
}
|
||||
// newConfig creates a validated config configured with options.
|
||||
func newConfig(options ...Option) config {
|
||||
cfg := config{}
|
||||
for _, opt := range options {
|
||||
cfg = opt.apply(cfg)
|
||||
}
|
||||
return cfg, nil
|
||||
|
||||
if cfg.encoder == nil {
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", "\t")
|
||||
cfg.encoder = &encoderHolder{encoder: enc}
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
// Option sets the value of an option for a Config.
|
||||
// Option sets exporter option values.
|
||||
type Option interface {
|
||||
apply(config) config
|
||||
}
|
||||
|
||||
// WithWriter sets the export stream destination.
|
||||
func WithWriter(w io.Writer) Option {
|
||||
return writerOption{w}
|
||||
type optionFunc func(config) config
|
||||
|
||||
func (o optionFunc) apply(c config) config {
|
||||
return o(c)
|
||||
}
|
||||
|
||||
type writerOption struct {
|
||||
W io.Writer
|
||||
}
|
||||
|
||||
func (o writerOption) apply(cfg config) config {
|
||||
cfg.Writer = o.W
|
||||
return cfg
|
||||
}
|
||||
|
||||
// WithPrettyPrint sets the export stream format to use JSON.
|
||||
func WithPrettyPrint() Option {
|
||||
return prettyPrintOption(true)
|
||||
}
|
||||
|
||||
type prettyPrintOption bool
|
||||
|
||||
func (o prettyPrintOption) apply(cfg config) config {
|
||||
cfg.PrettyPrint = bool(o)
|
||||
return cfg
|
||||
}
|
||||
|
||||
// WithoutTimestamps sets the export stream to not include timestamps.
|
||||
func WithoutTimestamps() Option {
|
||||
return timestampsOption(false)
|
||||
}
|
||||
|
||||
type timestampsOption bool
|
||||
|
||||
func (o timestampsOption) apply(cfg config) config {
|
||||
cfg.Timestamps = bool(o)
|
||||
return cfg
|
||||
}
|
||||
|
||||
// WithAttributeEncoder sets the attribute encoder used in export.
|
||||
func WithAttributeEncoder(enc attribute.Encoder) Option {
|
||||
return attrEncoderOption{enc}
|
||||
}
|
||||
|
||||
type attrEncoderOption struct {
|
||||
encoder attribute.Encoder
|
||||
}
|
||||
|
||||
func (o attrEncoderOption) apply(cfg config) config {
|
||||
cfg.Encoder = o.encoder
|
||||
return cfg
|
||||
// WithEncoder sets the exporter to use encoder to encode all the metric
|
||||
// data-types to an output.
|
||||
func WithEncoder(encoder Encoder) Option {
|
||||
return optionFunc(func(c config) config {
|
||||
if encoder != nil {
|
||||
c.encoder = &encoderHolder{encoder: encoder}
|
||||
}
|
||||
return c
|
||||
})
|
||||
}
|
||||
|
||||
@@ -12,10 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package stdoutmetric contains an OpenTelemetry exporter for metric
|
||||
// telemetry to be written to an output destination as JSON.
|
||||
// Package stdoutmetric provides an exporter for OpenTelemetry metric
|
||||
// telemetry.
|
||||
//
|
||||
// This package is currently in a pre-GA phase. Backwards incompatible changes
|
||||
// may be introduced in subsequent minor version releases as we work to track
|
||||
// the evolving OpenTelemetry specification and user feedback.
|
||||
// The exporter is intended to be used for testing and debugging, it is not
|
||||
// meant for production use. Additionally, it does not provide an interchange
|
||||
// format for OpenTelemetry that is supported with any stability or
|
||||
// compatibility guarantees. If these are needed features, please use the OTLP
|
||||
// exporter instead.
|
||||
package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
|
||||
|
||||
43
exporters/stdout/stdoutmetric/encoder.go
Normal file
43
exporters/stdout/stdoutmetric/encoder.go
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
|
||||
|
||||
import "errors"
|
||||
|
||||
// Encoder encodes and outputs OpenTelemetry metric data-types as human
|
||||
// readable text.
|
||||
type Encoder interface {
|
||||
// Encode handles the encoding and writing of OpenTelemetry metric data.
|
||||
Encode(v any) error
|
||||
}
|
||||
|
||||
// encoderHolder is the concrete type used to wrap an Encoder so it can be
|
||||
// used as a atomic.Value type.
|
||||
type encoderHolder struct {
|
||||
encoder Encoder
|
||||
}
|
||||
|
||||
func (e encoderHolder) Encode(v any) error { return e.encoder.Encode(v) }
|
||||
|
||||
// shutdownEncoder is used when the exporter is shutdown. It always returns
|
||||
// errShutdown when Encode is called.
|
||||
type shutdownEncoder struct{}
|
||||
|
||||
var errShutdown = errors.New("exporter shutdown")
|
||||
|
||||
func (shutdownEncoder) Encode(any) error { return errShutdown }
|
||||
@@ -12,100 +12,226 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package stdoutmetric_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/global"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncint64"
|
||||
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
|
||||
)
|
||||
|
||||
const (
|
||||
instrumentationName = "github.com/instrumentron"
|
||||
instrumentationVersion = "v0.1.0"
|
||||
"go.opentelemetry.io/otel/metric/unit"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
|
||||
)
|
||||
|
||||
var (
|
||||
loopCounter syncint64.Counter
|
||||
paramValue syncint64.Histogram
|
||||
// Sat Jan 01 2000 00:00:00 GMT+0000.
|
||||
now = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
|
||||
|
||||
nameKey = attribute.Key("function.name")
|
||||
res = resource.NewSchemaless(
|
||||
semconv.ServiceNameKey.String("stdoutmetric-example"),
|
||||
)
|
||||
|
||||
mockData = metricdata.ResourceMetrics{
|
||||
Resource: res,
|
||||
ScopeMetrics: []metricdata.ScopeMetrics{
|
||||
{
|
||||
Scope: instrumentation.Scope{Name: "example", Version: "v0.0.1"},
|
||||
Metrics: []metricdata.Metrics{
|
||||
{
|
||||
Name: "requests",
|
||||
Description: "Number of requests received",
|
||||
Unit: unit.Dimensionless,
|
||||
Data: metricdata.Sum[int64]{
|
||||
IsMonotonic: true,
|
||||
Temporality: metricdata.DeltaTemporality,
|
||||
DataPoints: []metricdata.DataPoint[int64]{
|
||||
{
|
||||
Attributes: attribute.NewSet(attribute.String("server", "central")),
|
||||
StartTime: now,
|
||||
Time: now.Add(1 * time.Second),
|
||||
Value: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "latency",
|
||||
Description: "Time spend processing received requests",
|
||||
Unit: unit.Milliseconds,
|
||||
Data: metricdata.Histogram{
|
||||
Temporality: metricdata.DeltaTemporality,
|
||||
DataPoints: []metricdata.HistogramDataPoint{
|
||||
{
|
||||
Attributes: attribute.NewSet(attribute.String("server", "central")),
|
||||
StartTime: now,
|
||||
Time: now.Add(1 * time.Second),
|
||||
Count: 10,
|
||||
Bounds: []float64{1, 5, 10},
|
||||
BucketCounts: []uint64{1, 3, 6, 0},
|
||||
Sum: 57,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "temperature",
|
||||
Description: "CPU global temperature",
|
||||
Unit: unit.Unit("cel(1 K)"),
|
||||
Data: metricdata.Gauge[float64]{
|
||||
DataPoints: []metricdata.DataPoint[float64]{
|
||||
{
|
||||
Attributes: attribute.NewSet(attribute.String("server", "central")),
|
||||
Time: now.Add(1 * time.Second),
|
||||
Value: 32.4,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func add(ctx context.Context, x, y int64) int64 {
|
||||
nameKV := nameKey.String("add")
|
||||
|
||||
loopCounter.Add(ctx, 1, nameKV)
|
||||
paramValue.Record(ctx, x, nameKV)
|
||||
paramValue.Record(ctx, y, nameKV)
|
||||
|
||||
return x + y
|
||||
}
|
||||
|
||||
func multiply(ctx context.Context, x, y int64) int64 {
|
||||
nameKV := nameKey.String("multiply")
|
||||
|
||||
loopCounter.Add(ctx, 1, nameKV)
|
||||
paramValue.Record(ctx, x, nameKV)
|
||||
paramValue.Record(ctx, y, nameKV)
|
||||
|
||||
return x * y
|
||||
}
|
||||
|
||||
func InstallExportPipeline(ctx context.Context) (func(context.Context) error, error) {
|
||||
exporter, err := stdoutmetric.New(stdoutmetric.WithPrettyPrint())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating stdoutmetric exporter: %w", err)
|
||||
}
|
||||
|
||||
pusher := controller.New(
|
||||
processor.NewFactory(
|
||||
simple.NewWithInexpensiveDistribution(),
|
||||
exporter,
|
||||
),
|
||||
controller.WithExporter(exporter),
|
||||
)
|
||||
if err = pusher.Start(ctx); err != nil {
|
||||
log.Fatalf("starting push controller: %v", err)
|
||||
}
|
||||
|
||||
global.SetMeterProvider(pusher)
|
||||
meter := global.Meter(instrumentationName, metric.WithInstrumentationVersion(instrumentationVersion))
|
||||
|
||||
loopCounter, err = meter.SyncInt64().Counter("function.loops")
|
||||
if err != nil {
|
||||
log.Fatalf("creating instrument: %v", err)
|
||||
}
|
||||
paramValue, err = meter.SyncInt64().Histogram("function.param")
|
||||
if err != nil {
|
||||
log.Fatalf("creating instrument: %v", err)
|
||||
}
|
||||
|
||||
return pusher.Stop, nil
|
||||
}
|
||||
|
||||
func Example() {
|
||||
ctx := context.Background()
|
||||
|
||||
// TODO: Registers a meter Provider globally.
|
||||
shutdown, err := InstallExportPipeline(ctx)
|
||||
// Print with a JSON encoder that indents with two spaces.
|
||||
enc := json.NewEncoder(os.Stdout)
|
||||
enc.SetIndent("", " ")
|
||||
exp, err := stdoutmetric.New(stdoutmetric.WithEncoder(enc))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
panic(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := shutdown(ctx); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Println("the answer is", add(ctx, multiply(ctx, multiply(ctx, 2, 2), 10), 2))
|
||||
// Register the exporter with an SDK via a periodic reader.
|
||||
sdk := metric.NewMeterProvider(
|
||||
metric.WithResource(res),
|
||||
metric.WithReader(metric.NewPeriodicReader(exp)),
|
||||
)
|
||||
|
||||
ctx := context.Background()
|
||||
// This is where the sdk would be used to create a Meter and from that
|
||||
// instruments that would make measurments of your code. To simulate that
|
||||
// behavior, call export directly with mocked data.
|
||||
_ = exp.Export(ctx, mockData)
|
||||
|
||||
// Ensure the periodic reader is cleaned up by shutting down the sdk.
|
||||
_ = sdk.Shutdown(ctx)
|
||||
|
||||
// Output:
|
||||
// {
|
||||
// "Resource": [
|
||||
// {
|
||||
// "Key": "service.name",
|
||||
// "Value": {
|
||||
// "Type": "STRING",
|
||||
// "Value": "stdoutmetric-example"
|
||||
// }
|
||||
// }
|
||||
// ],
|
||||
// "ScopeMetrics": [
|
||||
// {
|
||||
// "Scope": {
|
||||
// "Name": "example",
|
||||
// "Version": "v0.0.1",
|
||||
// "SchemaURL": ""
|
||||
// },
|
||||
// "Metrics": [
|
||||
// {
|
||||
// "Name": "requests",
|
||||
// "Description": "Number of requests received",
|
||||
// "Unit": "1",
|
||||
// "Data": {
|
||||
// "DataPoints": [
|
||||
// {
|
||||
// "Attributes": [
|
||||
// {
|
||||
// "Key": "server",
|
||||
// "Value": {
|
||||
// "Type": "STRING",
|
||||
// "Value": "central"
|
||||
// }
|
||||
// }
|
||||
// ],
|
||||
// "StartTime": "2000-01-01T00:00:00Z",
|
||||
// "Time": "2000-01-01T00:00:01Z",
|
||||
// "Value": 5
|
||||
// }
|
||||
// ],
|
||||
// "Temporality": "DeltaTemporality",
|
||||
// "IsMonotonic": true
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// "Name": "latency",
|
||||
// "Description": "Time spend processing received requests",
|
||||
// "Unit": "ms",
|
||||
// "Data": {
|
||||
// "DataPoints": [
|
||||
// {
|
||||
// "Attributes": [
|
||||
// {
|
||||
// "Key": "server",
|
||||
// "Value": {
|
||||
// "Type": "STRING",
|
||||
// "Value": "central"
|
||||
// }
|
||||
// }
|
||||
// ],
|
||||
// "StartTime": "2000-01-01T00:00:00Z",
|
||||
// "Time": "2000-01-01T00:00:01Z",
|
||||
// "Count": 10,
|
||||
// "Bounds": [
|
||||
// 1,
|
||||
// 5,
|
||||
// 10
|
||||
// ],
|
||||
// "BucketCounts": [
|
||||
// 1,
|
||||
// 3,
|
||||
// 6,
|
||||
// 0
|
||||
// ],
|
||||
// "Sum": 57
|
||||
// }
|
||||
// ],
|
||||
// "Temporality": "DeltaTemporality"
|
||||
// }
|
||||
// },
|
||||
// {
|
||||
// "Name": "temperature",
|
||||
// "Description": "CPU global temperature",
|
||||
// "Unit": "cel(1 K)",
|
||||
// "Data": {
|
||||
// "DataPoints": [
|
||||
// {
|
||||
// "Attributes": [
|
||||
// {
|
||||
// "Key": "server",
|
||||
// "Value": {
|
||||
// "Type": "STRING",
|
||||
// "Value": "central"
|
||||
// }
|
||||
// }
|
||||
// ],
|
||||
// "StartTime": "0001-01-01T00:00:00Z",
|
||||
// "Time": "2000-01-01T00:00:01Z",
|
||||
// "Value": 32.4
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
}
|
||||
|
||||
@@ -12,27 +12,60 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
|
||||
|
||||
import "go.opentelemetry.io/otel/sdk/metric/export"
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
// Exporter is an OpenTelemetry metric exporter that transmits telemetry to
|
||||
// the local STDOUT.
|
||||
type Exporter struct {
|
||||
metricExporter
|
||||
}
|
||||
|
||||
var (
|
||||
_ export.Exporter = &Exporter{}
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// New creates an Exporter with the passed options.
|
||||
func New(options ...Option) (*Exporter, error) {
|
||||
cfg, err := newConfig(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Exporter{
|
||||
metricExporter: metricExporter{cfg},
|
||||
}, nil
|
||||
// exporter is an OpenTelemetry metric exporter.
|
||||
type exporter struct {
|
||||
encVal atomic.Value // encoderHolder
|
||||
|
||||
shutdownOnce sync.Once
|
||||
}
|
||||
|
||||
// New returns a configured metric exporter.
|
||||
//
|
||||
// If no options are passed, the default exporter returned will use a JSON
|
||||
// encoder with tab indentations that output to STDOUT.
|
||||
func New(options ...Option) (metric.Exporter, error) {
|
||||
cfg := newConfig(options...)
|
||||
exp := &exporter{}
|
||||
exp.encVal.Store(*cfg.encoder)
|
||||
return exp, nil
|
||||
}
|
||||
|
||||
func (e *exporter) Export(ctx context.Context, data metricdata.ResourceMetrics) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Don't do anything if the context has already timed out.
|
||||
return ctx.Err()
|
||||
default:
|
||||
// Context is still valid, continue.
|
||||
}
|
||||
|
||||
return e.encVal.Load().(encoderHolder).Encode(data)
|
||||
}
|
||||
|
||||
func (e *exporter) ForceFlush(ctx context.Context) error {
|
||||
// exporter holds no state, nothing to flush.
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
func (e *exporter) Shutdown(ctx context.Context) error {
|
||||
e.shutdownOnce.Do(func() {
|
||||
e.encVal.Store(encoderHolder{
|
||||
encoder: shutdownEncoder{},
|
||||
})
|
||||
})
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
102
exporters/stdout/stdoutmetric/exporter_test.go
Normal file
102
exporters/stdout/stdoutmetric/exporter_test.go
Normal file
@@ -0,0 +1,102 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build go1.18
|
||||
// +build go1.18
|
||||
|
||||
package stdoutmetric_test // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
func testEncoderOption() stdoutmetric.Option {
|
||||
// Discard export output for testing.
|
||||
enc := json.NewEncoder(io.Discard)
|
||||
return stdoutmetric.WithEncoder(enc)
|
||||
}
|
||||
|
||||
func testCtxErrHonored(factory func(*testing.T) func(context.Context) error) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
t.Helper()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
t.Cleanup(cancel)
|
||||
|
||||
t.Run("DeadlineExceeded", func(t *testing.T) {
|
||||
innerCtx, innerCancel := context.WithTimeout(ctx, time.Nanosecond)
|
||||
t.Cleanup(innerCancel)
|
||||
<-innerCtx.Done()
|
||||
|
||||
f := factory(t)
|
||||
assert.ErrorIs(t, f(innerCtx), context.DeadlineExceeded)
|
||||
})
|
||||
|
||||
t.Run("Canceled", func(t *testing.T) {
|
||||
innerCtx, innerCancel := context.WithCancel(ctx)
|
||||
innerCancel()
|
||||
|
||||
f := factory(t)
|
||||
assert.ErrorIs(t, f(innerCtx), context.Canceled)
|
||||
})
|
||||
|
||||
t.Run("NoError", func(t *testing.T) {
|
||||
f := factory(t)
|
||||
assert.NoError(t, f(ctx))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExporterHonorsContextErrors(t *testing.T) {
|
||||
t.Run("Shutdown", testCtxErrHonored(func(t *testing.T) func(context.Context) error {
|
||||
exp, err := stdoutmetric.New(testEncoderOption())
|
||||
require.NoError(t, err)
|
||||
return exp.Shutdown
|
||||
}))
|
||||
|
||||
t.Run("ForceFlush", testCtxErrHonored(func(t *testing.T) func(context.Context) error {
|
||||
exp, err := stdoutmetric.New(testEncoderOption())
|
||||
require.NoError(t, err)
|
||||
return exp.ForceFlush
|
||||
}))
|
||||
|
||||
t.Run("Export", testCtxErrHonored(func(t *testing.T) func(context.Context) error {
|
||||
exp, err := stdoutmetric.New(testEncoderOption())
|
||||
require.NoError(t, err)
|
||||
return func(ctx context.Context) error {
|
||||
var data metricdata.ResourceMetrics
|
||||
return exp.Export(ctx, data)
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
func TestShutdownExporterReturnsShutdownErrorOnExport(t *testing.T) {
|
||||
var (
|
||||
data metricdata.ResourceMetrics
|
||||
ctx = context.Background()
|
||||
exp, err = stdoutmetric.New(testEncoderOption())
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, exp.Shutdown(ctx))
|
||||
assert.EqualError(t, exp.Export(ctx, data), "exporter shutdown")
|
||||
}
|
||||
@@ -1,18 +1,13 @@
|
||||
module go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
|
||||
|
||||
go 1.17
|
||||
|
||||
replace (
|
||||
go.opentelemetry.io/otel => ../../..
|
||||
go.opentelemetry.io/otel/sdk => ../../../sdk
|
||||
)
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/stretchr/testify v1.7.1
|
||||
go.opentelemetry.io/otel v1.10.0
|
||||
go.opentelemetry.io/otel/metric v0.31.0
|
||||
go.opentelemetry.io/otel/sdk v1.10.0
|
||||
go.opentelemetry.io/otel/sdk/metric v0.31.0
|
||||
go.opentelemetry.io/otel/metric v0.0.0-00010101000000-000000000000
|
||||
go.opentelemetry.io/otel/sdk v0.0.0-00010101000000-000000000000
|
||||
go.opentelemetry.io/otel/sdk/metric v0.0.0-00010101000000-000000000000
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -27,6 +22,10 @@ require (
|
||||
|
||||
replace go.opentelemetry.io/otel/metric => ../../../metric
|
||||
|
||||
replace go.opentelemetry.io/otel => ../../..
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric
|
||||
|
||||
replace go.opentelemetry.io/otel/trace => ../../../trace
|
||||
|
||||
replace go.opentelemetry.io/otel/sdk => ../../../sdk
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
|
||||
@@ -1,144 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/sdkapi"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
type metricExporter struct {
|
||||
config config
|
||||
}
|
||||
|
||||
var _ export.Exporter = &metricExporter{}
|
||||
|
||||
type line struct {
|
||||
Name string `json:"Name"`
|
||||
Sum interface{} `json:"Sum,omitempty"`
|
||||
Count interface{} `json:"Count,omitempty"`
|
||||
LastValue interface{} `json:"Last,omitempty"`
|
||||
|
||||
// Note: this is a pointer because omitempty doesn't work when time.IsZero()
|
||||
Timestamp *time.Time `json:"Timestamp,omitempty"`
|
||||
}
|
||||
|
||||
func (e *metricExporter) TemporalityFor(desc *sdkapi.Descriptor, kind aggregation.Kind) aggregation.Temporality {
|
||||
return aggregation.StatelessTemporalitySelector().TemporalityFor(desc, kind)
|
||||
}
|
||||
|
||||
func (e *metricExporter) Export(_ context.Context, res *resource.Resource, reader export.InstrumentationLibraryReader) error {
|
||||
var aggError error
|
||||
var batch []line
|
||||
aggError = reader.ForEach(func(lib instrumentation.Library, mr export.Reader) error {
|
||||
var instAttrs []attribute.KeyValue
|
||||
if name := lib.Name; name != "" {
|
||||
instAttrs = append(instAttrs, attribute.String("instrumentation.name", name))
|
||||
if version := lib.Version; version != "" {
|
||||
instAttrs = append(instAttrs, attribute.String("instrumentation.version", version))
|
||||
}
|
||||
if schema := lib.SchemaURL; schema != "" {
|
||||
instAttrs = append(instAttrs, attribute.String("instrumentation.schema_url", schema))
|
||||
}
|
||||
}
|
||||
instSet := attribute.NewSet(instAttrs...)
|
||||
encodedInstAttrs := instSet.Encoded(e.config.Encoder)
|
||||
|
||||
return mr.ForEach(e, func(record export.Record) error {
|
||||
desc := record.Descriptor()
|
||||
agg := record.Aggregation()
|
||||
kind := desc.NumberKind()
|
||||
encodedResource := res.Encoded(e.config.Encoder)
|
||||
|
||||
var expose line
|
||||
|
||||
if sum, ok := agg.(aggregation.Sum); ok {
|
||||
value, err := sum.Sum()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expose.Sum = value.AsInterface(kind)
|
||||
} else if lv, ok := agg.(aggregation.LastValue); ok {
|
||||
value, timestamp, err := lv.LastValue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expose.LastValue = value.AsInterface(kind)
|
||||
|
||||
if e.config.Timestamps {
|
||||
expose.Timestamp = ×tamp
|
||||
}
|
||||
}
|
||||
|
||||
var encodedAttrs string
|
||||
iter := record.Attributes().Iter()
|
||||
if iter.Len() > 0 {
|
||||
encodedAttrs = record.Attributes().Encoded(e.config.Encoder)
|
||||
}
|
||||
|
||||
var sb strings.Builder
|
||||
|
||||
_, _ = sb.WriteString(desc.Name())
|
||||
|
||||
if len(encodedAttrs) > 0 || len(encodedResource) > 0 || len(encodedInstAttrs) > 0 {
|
||||
_, _ = sb.WriteRune('{')
|
||||
_, _ = sb.WriteString(encodedResource)
|
||||
if len(encodedInstAttrs) > 0 && len(encodedResource) > 0 {
|
||||
_, _ = sb.WriteRune(',')
|
||||
}
|
||||
_, _ = sb.WriteString(encodedInstAttrs)
|
||||
if len(encodedAttrs) > 0 && (len(encodedInstAttrs) > 0 || len(encodedResource) > 0) {
|
||||
_, _ = sb.WriteRune(',')
|
||||
}
|
||||
_, _ = sb.WriteString(encodedAttrs)
|
||||
_, _ = sb.WriteRune('}')
|
||||
}
|
||||
|
||||
expose.Name = sb.String()
|
||||
|
||||
batch = append(batch, expose)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
if len(batch) == 0 {
|
||||
return aggError
|
||||
}
|
||||
|
||||
data, err := e.marshal(batch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(e.config.Writer, string(data))
|
||||
|
||||
return aggError
|
||||
}
|
||||
|
||||
// marshal v with appropriate indentation.
|
||||
func (e *metricExporter) marshal(v interface{}) ([]byte, error) {
|
||||
if e.config.PrettyPrint {
|
||||
return json.MarshalIndent(v, "", "\t")
|
||||
}
|
||||
return json.Marshal(v)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user