mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-07-24 20:28:02 +08:00
deps: update buildkit, vendor changes
Signed-off-by: Laura Brehm <laurabrehm@hey.com>
This commit is contained in:
201
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/LICENSE
generated
vendored
Normal file
201
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
20
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/doc.go
generated
vendored
Normal file
20
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/doc.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package otlpmetric provides an OpenTelemetry metric Exporter that can be
|
||||
// used with PeriodicReader. It transforms metricdata into OTLP and transmits
|
||||
// the transformed data to OTLP receivers. The Exporter is configurable to use
|
||||
// different Clients, each using a distinct transport protocol to communicate
|
||||
// to an OTLP receiving endpoint.
|
||||
package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
201
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE
generated
vendored
Normal file
201
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
199
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
generated
vendored
Normal file
199
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
generated
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
|
||||
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
type client struct {
|
||||
metadata metadata.MD
|
||||
exportTimeout time.Duration
|
||||
requestFunc retry.RequestFunc
|
||||
|
||||
// ourConn keeps track of where conn was created: true if created here in
|
||||
// NewClient, or false if passed with an option. This is important on
|
||||
// Shutdown as the conn should only be closed if we created it. Otherwise,
|
||||
// it is up to the processes that passed the conn to close it.
|
||||
ourConn bool
|
||||
conn *grpc.ClientConn
|
||||
msc colmetricpb.MetricsServiceClient
|
||||
}
|
||||
|
||||
// newClient creates a new gRPC metric client.
|
||||
func newClient(ctx context.Context, cfg oconf.Config) (*client, error) {
|
||||
c := &client{
|
||||
exportTimeout: cfg.Metrics.Timeout,
|
||||
requestFunc: cfg.RetryConfig.RequestFunc(retryable),
|
||||
conn: cfg.GRPCConn,
|
||||
}
|
||||
|
||||
if len(cfg.Metrics.Headers) > 0 {
|
||||
c.metadata = metadata.New(cfg.Metrics.Headers)
|
||||
}
|
||||
|
||||
if c.conn == nil {
|
||||
// If the caller did not provide a ClientConn when the client was
|
||||
// created, create one using the configuration they did provide.
|
||||
conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, cfg.DialOptions...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Keep track that we own the lifecycle of this conn and need to close
|
||||
// it on Shutdown.
|
||||
c.ourConn = true
|
||||
c.conn = conn
|
||||
}
|
||||
|
||||
c.msc = colmetricpb.NewMetricsServiceClient(c.conn)
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Shutdown shuts down the client, freeing all resource.
|
||||
//
|
||||
// Any active connections to a remote endpoint are closed if they were created
|
||||
// by the client. Any gRPC connection passed during creation using
|
||||
// WithGRPCConn will not be closed. It is the caller's responsibility to
|
||||
// handle cleanup of that resource.
|
||||
func (c *client) Shutdown(ctx context.Context) error {
|
||||
// The otlpmetric.Exporter synchronizes access to client methods and
|
||||
// ensures this is called only once. The only thing that needs to be done
|
||||
// here is to release any computational resources the client holds.
|
||||
|
||||
c.metadata = nil
|
||||
c.requestFunc = nil
|
||||
c.msc = nil
|
||||
|
||||
err := ctx.Err()
|
||||
if c.ourConn {
|
||||
closeErr := c.conn.Close()
|
||||
// A context timeout error takes precedence over this error.
|
||||
if err == nil && closeErr != nil {
|
||||
err = closeErr
|
||||
}
|
||||
}
|
||||
c.conn = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// UploadMetrics sends protoMetrics to connected endpoint.
|
||||
//
|
||||
// Retryable errors from the server will be handled according to any
|
||||
// RetryConfig the client was created with.
|
||||
func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
|
||||
// The otlpmetric.Exporter synchronizes access to client methods, and
|
||||
// ensures this is not called after the Exporter is shutdown. Only thing
|
||||
// to do here is send data.
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Do not upload if the context is already expired.
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
ctx, cancel := c.exportContext(ctx)
|
||||
defer cancel()
|
||||
|
||||
return c.requestFunc(ctx, func(iCtx context.Context) error {
|
||||
resp, err := c.msc.Export(iCtx, &colmetricpb.ExportMetricsServiceRequest{
|
||||
ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics},
|
||||
})
|
||||
if resp != nil && resp.PartialSuccess != nil {
|
||||
msg := resp.PartialSuccess.GetErrorMessage()
|
||||
n := resp.PartialSuccess.GetRejectedDataPoints()
|
||||
if n != 0 || msg != "" {
|
||||
err := internal.MetricPartialSuccessError(n, msg)
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
// nil is converted to OK.
|
||||
if status.Code(err) == codes.OK {
|
||||
// Success.
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// exportContext returns a copy of parent with an appropriate deadline and
|
||||
// cancellation function based on the clients configured export timeout.
|
||||
//
|
||||
// It is the callers responsibility to cancel the returned context once its
|
||||
// use is complete, via the parent or directly with the returned CancelFunc, to
|
||||
// ensure all resources are correctly released.
|
||||
func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
|
||||
var (
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
)
|
||||
|
||||
if c.exportTimeout > 0 {
|
||||
ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
|
||||
} else {
|
||||
ctx, cancel = context.WithCancel(parent)
|
||||
}
|
||||
|
||||
if c.metadata.Len() > 0 {
|
||||
ctx = metadata.NewOutgoingContext(ctx, c.metadata)
|
||||
}
|
||||
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
// retryable returns if err identifies a request that can be retried and a
|
||||
// duration to wait for if an explicit throttle time is included in err.
|
||||
func retryable(err error) (bool, time.Duration) {
|
||||
s := status.Convert(err)
|
||||
switch s.Code() {
|
||||
case codes.Canceled,
|
||||
codes.DeadlineExceeded,
|
||||
codes.ResourceExhausted,
|
||||
codes.Aborted,
|
||||
codes.OutOfRange,
|
||||
codes.Unavailable,
|
||||
codes.DataLoss:
|
||||
return true, throttleDelay(s)
|
||||
}
|
||||
|
||||
// Not a retry-able error.
|
||||
return false, 0
|
||||
}
|
||||
|
||||
// throttleDelay returns a duration to wait for if an explicit throttle time
|
||||
// is included in the response status.
|
||||
func throttleDelay(s *status.Status) time.Duration {
|
||||
for _, detail := range s.Details() {
|
||||
if t, ok := detail.(*errdetails.RetryInfo); ok {
|
||||
return t.RetryDelay.AsDuration()
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
256
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
generated
vendored
Normal file
256
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
generated
vendored
Normal file
@@ -0,0 +1,256 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
)
|
||||
|
||||
// Option applies a configuration option to the Exporter.
|
||||
type Option interface {
|
||||
applyGRPCOption(oconf.Config) oconf.Config
|
||||
}
|
||||
|
||||
func asGRPCOptions(opts []Option) []oconf.GRPCOption {
|
||||
converted := make([]oconf.GRPCOption, len(opts))
|
||||
for i, o := range opts {
|
||||
converted[i] = oconf.NewGRPCOption(o.applyGRPCOption)
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
// RetryConfig defines configuration for retrying the export of metric data
|
||||
// that failed.
|
||||
//
|
||||
// This configuration does not define any network retry strategy. That is
|
||||
// entirely handled by the gRPC ClientConn.
|
||||
type RetryConfig retry.Config
|
||||
|
||||
type wrappedOption struct {
|
||||
oconf.GRPCOption
|
||||
}
|
||||
|
||||
func (w wrappedOption) applyGRPCOption(cfg oconf.Config) oconf.Config {
|
||||
return w.ApplyGRPCOption(cfg)
|
||||
}
|
||||
|
||||
// WithInsecure disables client transport security for the Exporter's gRPC
|
||||
// connection, just like grpc.WithInsecure()
|
||||
// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used to determine client security. If the endpoint has a
|
||||
// scheme of "http" or "unix" client security will be disabled. If both are
|
||||
// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, client security will be used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithInsecure() Option {
|
||||
return wrappedOption{oconf.WithInsecure()}
|
||||
}
|
||||
|
||||
// WithEndpoint sets the target endpoint the Exporter will connect to.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, "localhost:4317" will be used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithEndpoint(endpoint string) Option {
|
||||
return wrappedOption{oconf.WithEndpoint(endpoint)}
|
||||
}
|
||||
|
||||
// WithReconnectionPeriod set the minimum amount of time between connection
|
||||
// attempts to the target endpoint.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithReconnectionPeriod(rp time.Duration) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.ReconnectionPeriod = rp
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
func compressorToCompression(compressor string) oconf.Compression {
|
||||
if compressor == "gzip" {
|
||||
return oconf.GzipCompression
|
||||
}
|
||||
|
||||
otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
|
||||
return oconf.NoCompression
|
||||
}
|
||||
|
||||
// WithCompressor sets the compressor the gRPC client uses.
|
||||
//
|
||||
// It is the responsibility of the caller to ensure that the compressor set
|
||||
// has been registered with google.golang.org/grpc/encoding (see
|
||||
// encoding.RegisterCompressor for more information). For example, to register
|
||||
// the gzip compressor import the package:
|
||||
//
|
||||
// import _ "google.golang.org/grpc/encoding/gzip"
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_COMPRESSION or
|
||||
// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
|
||||
// this option is not passed, that variable value will be used. That value can
|
||||
// be either "none" or "gzip". If both are set,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, no compressor will be used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithCompressor(compressor string) Option {
|
||||
return wrappedOption{oconf.WithCompression(compressorToCompression(compressor))}
|
||||
}
|
||||
|
||||
// WithHeaders will send the provided headers with each gRPC requests.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. The value will be parsed as a list of key value pairs.
|
||||
// These pairs are expected to be in the W3C Correlation-Context format
|
||||
// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
|
||||
// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, no user headers will be set.
|
||||
func WithHeaders(headers map[string]string) Option {
|
||||
return wrappedOption{oconf.WithHeaders(headers)}
|
||||
}
|
||||
|
||||
// WithTLSCredentials sets the gRPC connection to use creds.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
|
||||
// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and
|
||||
// this option is not passed, that variable value will be used. The value will
|
||||
// be parsed the filepath of the TLS certificate chain to use. If both are
|
||||
// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, no TLS credentials will be used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithTLSCredentials(creds credentials.TransportCredentials) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.Metrics.GRPCCredentials = creds
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithServiceConfig defines the default gRPC service config used.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithServiceConfig(serviceConfig string) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.ServiceConfig = serviceConfig
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithDialOption sets explicit grpc.DialOptions to use when establishing a
|
||||
// gRPC connection. The options here are appended to the internal grpc.DialOptions
|
||||
// used so they will take precedence over any other internal grpc.DialOptions
|
||||
// they might conflict with.
|
||||
//
|
||||
// This option has no effect if WithGRPCConn is used.
|
||||
func WithDialOption(opts ...grpc.DialOption) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.DialOptions = opts
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
|
||||
//
|
||||
// This option takes precedence over any other option that relates to
|
||||
// establishing or persisting a gRPC connection to a target endpoint. Any
|
||||
// other option of those types passed will be ignored.
|
||||
//
|
||||
// It is the callers responsibility to close the passed conn. The Exporter
|
||||
// Shutdown method will not close this connection.
|
||||
func WithGRPCConn(conn *grpc.ClientConn) Option {
|
||||
return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config {
|
||||
cfg.GRPCConn = conn
|
||||
return cfg
|
||||
})}
|
||||
}
|
||||
|
||||
// WithTimeout sets the max amount of time an Exporter will attempt an export.
|
||||
//
|
||||
// This takes precedence over any retry settings defined by WithRetry. Once
|
||||
// this time limit has been reached the export is abandoned and the metric
|
||||
// data is dropped.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. The value will be parsed as an integer representing the
|
||||
// timeout in milliseconds. If both are set,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, a timeout of 10 seconds will be used.
|
||||
func WithTimeout(duration time.Duration) Option {
|
||||
return wrappedOption{oconf.WithTimeout(duration)}
|
||||
}
|
||||
|
||||
// WithRetry sets the retry policy for transient retryable errors that are
|
||||
// returned by the target endpoint.
|
||||
//
|
||||
// If the target endpoint responds with not only a retryable error, but
|
||||
// explicitly returns a backoff time in the response, that time will take
|
||||
// precedence over these settings.
|
||||
//
|
||||
// These settings do not define any network retry strategy. That is entirely
|
||||
// handled by the gRPC ClientConn.
|
||||
//
|
||||
// If unset, the default retry policy will be used. It will retry the export
|
||||
// 5 seconds after receiving a retryable error and increase exponentially
|
||||
// after each error for no more than a total time of 1 minute.
|
||||
func WithRetry(settings RetryConfig) Option {
|
||||
return wrappedOption{oconf.WithRetry(retry.Config(settings))}
|
||||
}
|
||||
|
||||
// WithTemporalitySelector sets the TemporalitySelector the client will use to
|
||||
// determine the Temporality of an instrument based on its kind. If this option
|
||||
// is not used, the client will use the DefaultTemporalitySelector from the
|
||||
// go.opentelemetry.io/otel/sdk/metric package.
|
||||
func WithTemporalitySelector(selector metric.TemporalitySelector) Option {
|
||||
return wrappedOption{oconf.WithTemporalitySelector(selector)}
|
||||
}
|
||||
|
||||
// WithAggregationSelector sets the AggregationSelector the client will use to
|
||||
// determine the aggregation to use for an instrument based on its kind. If
|
||||
// this option is not used, the reader will use the DefaultAggregationSelector
|
||||
// from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation
|
||||
// explicitly passed for a view matching an instrument.
|
||||
func WithAggregationSelector(selector metric.AggregationSelector) Option {
|
||||
return wrappedOption{oconf.WithAggregationSelector(selector)}
|
||||
}
|
17
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
generated
vendored
Normal file
17
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package otlpmetricgrpc provides an otlpmetric.Exporter that communicates
|
||||
// with an OTLP receiving endpoint using gRPC.
|
||||
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
167
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go
generated
vendored
Normal file
167
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go
generated
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
// Exporter is a OpenTelemetry metric Exporter using gRPC.
|
||||
type Exporter struct {
|
||||
// Ensure synchronous access to the client across all functionality.
|
||||
clientMu sync.Mutex
|
||||
client interface {
|
||||
UploadMetrics(context.Context, *metricpb.ResourceMetrics) error
|
||||
Shutdown(context.Context) error
|
||||
}
|
||||
|
||||
temporalitySelector metric.TemporalitySelector
|
||||
aggregationSelector metric.AggregationSelector
|
||||
|
||||
shutdownOnce sync.Once
|
||||
}
|
||||
|
||||
func newExporter(c *client, cfg oconf.Config) (*Exporter, error) {
|
||||
ts := cfg.Metrics.TemporalitySelector
|
||||
if ts == nil {
|
||||
ts = func(metric.InstrumentKind) metricdata.Temporality {
|
||||
return metricdata.CumulativeTemporality
|
||||
}
|
||||
}
|
||||
|
||||
as := cfg.Metrics.AggregationSelector
|
||||
if as == nil {
|
||||
as = metric.DefaultAggregationSelector
|
||||
}
|
||||
|
||||
return &Exporter{
|
||||
client: c,
|
||||
|
||||
temporalitySelector: ts,
|
||||
aggregationSelector: as,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Temporality returns the Temporality to use for an instrument kind.
|
||||
func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality {
|
||||
return e.temporalitySelector(k)
|
||||
}
|
||||
|
||||
// Aggregation returns the Aggregation to use for an instrument kind.
|
||||
func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation {
|
||||
return e.aggregationSelector(k)
|
||||
}
|
||||
|
||||
// Export transforms and transmits metric data to an OTLP receiver.
|
||||
//
|
||||
// This method returns an error if called after Shutdown.
|
||||
// This method returns an error if the method is canceled by the passed context.
|
||||
func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
|
||||
defer global.Debug("OTLP/gRPC exporter export", "Data", rm)
|
||||
|
||||
otlpRm, err := transform.ResourceMetrics(rm)
|
||||
// Best effort upload of transformable metrics.
|
||||
e.clientMu.Lock()
|
||||
upErr := e.client.UploadMetrics(ctx, otlpRm)
|
||||
e.clientMu.Unlock()
|
||||
if upErr != nil {
|
||||
if err == nil {
|
||||
return fmt.Errorf("failed to upload metrics: %w", upErr)
|
||||
}
|
||||
// Merge the two errors.
|
||||
return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ForceFlush flushes any metric data held by an exporter.
|
||||
//
|
||||
// This method returns an error if called after Shutdown.
|
||||
// This method returns an error if the method is canceled by the passed context.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (e *Exporter) ForceFlush(ctx context.Context) error {
|
||||
// The exporter and client hold no state, nothing to flush.
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
// Shutdown flushes all metric data held by an exporter and releases any held
|
||||
// computational resources.
|
||||
//
|
||||
// This method returns an error if called after Shutdown.
|
||||
// This method returns an error if the method is canceled by the passed context.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (e *Exporter) Shutdown(ctx context.Context) error {
|
||||
err := errShutdown
|
||||
e.shutdownOnce.Do(func() {
|
||||
e.clientMu.Lock()
|
||||
client := e.client
|
||||
e.client = shutdownClient{}
|
||||
e.clientMu.Unlock()
|
||||
err = client.Shutdown(ctx)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
var errShutdown = fmt.Errorf("gRPC exporter is shutdown")
|
||||
|
||||
type shutdownClient struct{}
|
||||
|
||||
func (c shutdownClient) err(ctx context.Context) error {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return errShutdown
|
||||
}
|
||||
|
||||
func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error {
|
||||
return c.err(ctx)
|
||||
}
|
||||
|
||||
func (c shutdownClient) Shutdown(ctx context.Context) error {
|
||||
return c.err(ctx)
|
||||
}
|
||||
|
||||
// MarshalLog returns logging data about the Exporter.
|
||||
func (e *Exporter) MarshalLog() interface{} {
|
||||
return struct{ Type string }{Type: "OTLP/gRPC"}
|
||||
}
|
||||
|
||||
// New returns an OpenTelemetry metric Exporter. The Exporter can be used with
|
||||
// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving
|
||||
// endpoint using gRPC.
|
||||
//
|
||||
// If an already established gRPC ClientConn is not passed in options using
|
||||
// WithGRPCConn, a connection to the OTLP endpoint will be established based
|
||||
// on options. If a connection cannot be establishes in the lifetime of ctx,
|
||||
// an error will be returned.
|
||||
func New(ctx context.Context, options ...Option) (*Exporter, error) {
|
||||
cfg := oconf.NewGRPCConfig(asGRPCOptions(options)...)
|
||||
c, err := newClient(ctx, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newExporter(c, cfg)
|
||||
}
|
202
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
generated
vendored
Normal file
202
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
)
|
||||
|
||||
// ConfigFn is the generic function used to set a config.
|
||||
type ConfigFn func(*EnvOptionsReader)
|
||||
|
||||
// EnvOptionsReader reads the required environment variables.
|
||||
type EnvOptionsReader struct {
|
||||
GetEnv func(string) string
|
||||
ReadFile func(string) ([]byte, error)
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// Apply runs every ConfigFn.
|
||||
func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
|
||||
for _, o := range opts {
|
||||
o(e)
|
||||
}
|
||||
}
|
||||
|
||||
// GetEnvValue gets an OTLP environment variable value of the specified key
|
||||
// using the GetEnv function.
|
||||
// This function prepends the OTLP specified namespace to all key lookups.
|
||||
func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
|
||||
v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
|
||||
return v, v != ""
|
||||
}
|
||||
|
||||
// WithString retrieves the specified config and passes it to ConfigFn as a string.
|
||||
func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
fn(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
|
||||
func WithBool(n string, fn func(bool)) ConfigFn {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
b := strings.ToLower(v) == "true"
|
||||
fn(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
|
||||
func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
d, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
global.Error(err, "parse duration", "input", v)
|
||||
return
|
||||
}
|
||||
fn(time.Duration(d) * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
|
||||
func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
fn(stringToHeader(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
|
||||
func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
u, err := url.Parse(v)
|
||||
if err != nil {
|
||||
global.Error(err, "parse url", "input", v)
|
||||
return
|
||||
}
|
||||
fn(u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
|
||||
func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
b, err := e.ReadFile(v)
|
||||
if err != nil {
|
||||
global.Error(err, "read tls ca cert file", "file", v)
|
||||
return
|
||||
}
|
||||
c, err := createCertPool(b)
|
||||
if err != nil {
|
||||
global.Error(err, "create tls cert pool")
|
||||
return
|
||||
}
|
||||
fn(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
|
||||
func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
|
||||
return func(e *EnvOptionsReader) {
|
||||
vc, okc := e.GetEnvValue(nc)
|
||||
vk, okk := e.GetEnvValue(nk)
|
||||
if !okc || !okk {
|
||||
return
|
||||
}
|
||||
cert, err := e.ReadFile(vc)
|
||||
if err != nil {
|
||||
global.Error(err, "read tls client cert", "file", vc)
|
||||
return
|
||||
}
|
||||
key, err := e.ReadFile(vk)
|
||||
if err != nil {
|
||||
global.Error(err, "read tls client key", "file", vk)
|
||||
return
|
||||
}
|
||||
crt, err := tls.X509KeyPair(cert, key)
|
||||
if err != nil {
|
||||
global.Error(err, "create tls client key pair")
|
||||
return
|
||||
}
|
||||
fn(crt)
|
||||
}
|
||||
}
|
||||
|
||||
func keyWithNamespace(ns, key string) string {
|
||||
if ns == "" {
|
||||
return key
|
||||
}
|
||||
return fmt.Sprintf("%s_%s", ns, key)
|
||||
}
|
||||
|
||||
func stringToHeader(value string) map[string]string {
|
||||
headersPairs := strings.Split(value, ",")
|
||||
headers := make(map[string]string)
|
||||
|
||||
for _, header := range headersPairs {
|
||||
n, v, found := strings.Cut(header, "=")
|
||||
if !found {
|
||||
global.Error(errors.New("missing '="), "parse headers", "input", header)
|
||||
continue
|
||||
}
|
||||
name, err := url.QueryUnescape(n)
|
||||
if err != nil {
|
||||
global.Error(err, "escape header key", "key", n)
|
||||
continue
|
||||
}
|
||||
trimmedName := strings.TrimSpace(name)
|
||||
value, err := url.QueryUnescape(v)
|
||||
if err != nil {
|
||||
global.Error(err, "escape header value", "value", v)
|
||||
continue
|
||||
}
|
||||
trimmedValue := strings.TrimSpace(value)
|
||||
|
||||
headers[trimmedName] = trimmedValue
|
||||
}
|
||||
|
||||
return headers
|
||||
}
|
||||
|
||||
func createCertPool(certBytes []byte) (*x509.CertPool, error) {
|
||||
cp := x509.NewCertPool()
|
||||
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
|
||||
return nil, errors.New("failed to append certificate to the cert pool")
|
||||
}
|
||||
return cp, nil
|
||||
}
|
42
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
generated
vendored
Normal file
42
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/envconfig.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry\"}" --out=oconf/options.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/options_test.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client_test.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf\"}" --out=otest/collector.go
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go
|
221
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
generated
vendored
Normal file
221
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go
generated
vendored
Normal file
@@ -0,0 +1,221 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig"
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// DefaultEnvOptionsReader is the default environments reader.
|
||||
var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
|
||||
GetEnv: os.Getenv,
|
||||
ReadFile: os.ReadFile,
|
||||
Namespace: "OTEL_EXPORTER_OTLP",
|
||||
}
|
||||
|
||||
// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
|
||||
func ApplyGRPCEnvConfigs(cfg Config) Config {
|
||||
opts := getOptionsFromEnv()
|
||||
for _, opt := range opts {
|
||||
cfg = opt.ApplyGRPCOption(cfg)
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
|
||||
func ApplyHTTPEnvConfigs(cfg Config) Config {
|
||||
opts := getOptionsFromEnv()
|
||||
for _, opt := range opts {
|
||||
cfg = opt.ApplyHTTPOption(cfg)
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
func getOptionsFromEnv() []GenericOption {
|
||||
opts := []GenericOption{}
|
||||
|
||||
tlsConf := &tls.Config{}
|
||||
DefaultEnvOptionsReader.Apply(
|
||||
envconfig.WithURL("ENDPOINT", func(u *url.URL) {
|
||||
opts = append(opts, withEndpointScheme(u))
|
||||
opts = append(opts, newSplitOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Endpoint = u.Host
|
||||
// For OTLP/HTTP endpoint URLs without a per-signal
|
||||
// configuration, the passed endpoint is used as a base URL
|
||||
// and the signals are sent to these paths relative to that.
|
||||
cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath)
|
||||
return cfg
|
||||
}, withEndpointForGRPC(u)))
|
||||
}),
|
||||
envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) {
|
||||
opts = append(opts, withEndpointScheme(u))
|
||||
opts = append(opts, newSplitOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Endpoint = u.Host
|
||||
// For endpoint URLs for OTLP/HTTP per-signal variables, the
|
||||
// URL MUST be used as-is without any modification. The only
|
||||
// exception is that if an URL contains no path part, the root
|
||||
// path / MUST be used.
|
||||
path := u.Path
|
||||
if path == "" {
|
||||
path = "/"
|
||||
}
|
||||
cfg.Metrics.URLPath = path
|
||||
return cfg
|
||||
}, withEndpointForGRPC(u)))
|
||||
}),
|
||||
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
|
||||
envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
|
||||
envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
|
||||
WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||
WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||
envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||
envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||
withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
|
||||
withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
|
||||
)
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
|
||||
return func(cfg Config) Config {
|
||||
// For OTLP/gRPC endpoints, this is the target to which the
|
||||
// exporter is going to send telemetry.
|
||||
cfg.Metrics.Endpoint = path.Join(u.Host, u.Path)
|
||||
return cfg
|
||||
}
|
||||
}
|
||||
|
||||
// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
|
||||
func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
|
||||
return func(e *envconfig.EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
cp := NoCompression
|
||||
if v == "gzip" {
|
||||
cp = GzipCompression
|
||||
}
|
||||
|
||||
fn(cp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func withEndpointScheme(u *url.URL) GenericOption {
|
||||
switch strings.ToLower(u.Scheme) {
|
||||
case "http", "unix":
|
||||
return WithInsecure()
|
||||
default:
|
||||
return WithSecure()
|
||||
}
|
||||
}
|
||||
|
||||
// revive:disable-next-line:flag-parameter
|
||||
func withInsecure(b bool) GenericOption {
|
||||
if b {
|
||||
return WithInsecure()
|
||||
}
|
||||
return WithSecure()
|
||||
}
|
||||
|
||||
func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
|
||||
return func(e *envconfig.EnvOptionsReader) {
|
||||
if c.RootCAs != nil || len(c.Certificates) > 0 {
|
||||
fn(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) {
|
||||
return func(e *envconfig.EnvOptionsReader) {
|
||||
if s, ok := e.GetEnvValue(n); ok {
|
||||
switch strings.ToLower(s) {
|
||||
case "cumulative":
|
||||
fn(cumulativeTemporality)
|
||||
case "delta":
|
||||
fn(deltaTemporality)
|
||||
case "lowmemory":
|
||||
fn(lowMemory)
|
||||
default:
|
||||
global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
|
||||
return metricdata.CumulativeTemporality
|
||||
}
|
||||
|
||||
func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
|
||||
switch ik {
|
||||
case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
|
||||
return metricdata.DeltaTemporality
|
||||
default:
|
||||
return metricdata.CumulativeTemporality
|
||||
}
|
||||
}
|
||||
|
||||
func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
|
||||
switch ik {
|
||||
case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
|
||||
return metricdata.DeltaTemporality
|
||||
default:
|
||||
return metricdata.CumulativeTemporality
|
||||
}
|
||||
}
|
||||
|
||||
func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) {
|
||||
return func(e *envconfig.EnvOptionsReader) {
|
||||
if s, ok := e.GetEnvValue(n); ok {
|
||||
switch strings.ToLower(s) {
|
||||
case "explicit_bucket_histogram":
|
||||
fn(metric.DefaultAggregationSelector)
|
||||
case "base2_exponential_bucket_histogram":
|
||||
fn(func(kind metric.InstrumentKind) metric.Aggregation {
|
||||
if kind == metric.InstrumentKindHistogram {
|
||||
return metric.AggregationBase2ExponentialHistogram{
|
||||
MaxSize: 160,
|
||||
MaxScale: 20,
|
||||
NoMinMax: false,
|
||||
}
|
||||
}
|
||||
return metric.DefaultAggregationSelector(kind)
|
||||
})
|
||||
default:
|
||||
global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
359
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
generated
vendored
Normal file
359
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
generated
vendored
Normal file
@@ -0,0 +1,359 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/encoding/gzip"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultMaxAttempts describes how many times the driver
|
||||
// should retry the sending of the payload in case of a
|
||||
// retryable error.
|
||||
DefaultMaxAttempts int = 5
|
||||
// DefaultMetricsPath is a default URL path for endpoint that
|
||||
// receives metrics.
|
||||
DefaultMetricsPath string = "/v1/metrics"
|
||||
// DefaultBackoff is a default base backoff time used in the
|
||||
// exponential backoff strategy.
|
||||
DefaultBackoff time.Duration = 300 * time.Millisecond
|
||||
// DefaultTimeout is a default max waiting time for the backend to process
|
||||
// each span or metrics batch.
|
||||
DefaultTimeout time.Duration = 10 * time.Second
|
||||
)
|
||||
|
||||
type (
|
||||
SignalConfig struct {
|
||||
Endpoint string
|
||||
Insecure bool
|
||||
TLSCfg *tls.Config
|
||||
Headers map[string]string
|
||||
Compression Compression
|
||||
Timeout time.Duration
|
||||
URLPath string
|
||||
|
||||
// gRPC configurations
|
||||
GRPCCredentials credentials.TransportCredentials
|
||||
|
||||
TemporalitySelector metric.TemporalitySelector
|
||||
AggregationSelector metric.AggregationSelector
|
||||
}
|
||||
|
||||
Config struct {
|
||||
// Signal specific configurations
|
||||
Metrics SignalConfig
|
||||
|
||||
RetryConfig retry.Config
|
||||
|
||||
// gRPC configurations
|
||||
ReconnectionPeriod time.Duration
|
||||
ServiceConfig string
|
||||
DialOptions []grpc.DialOption
|
||||
GRPCConn *grpc.ClientConn
|
||||
}
|
||||
)
|
||||
|
||||
// NewHTTPConfig returns a new Config with all settings applied from opts and
|
||||
// any unset setting using the default HTTP config values.
|
||||
func NewHTTPConfig(opts ...HTTPOption) Config {
|
||||
cfg := Config{
|
||||
Metrics: SignalConfig{
|
||||
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
|
||||
URLPath: DefaultMetricsPath,
|
||||
Compression: NoCompression,
|
||||
Timeout: DefaultTimeout,
|
||||
|
||||
TemporalitySelector: metric.DefaultTemporalitySelector,
|
||||
AggregationSelector: metric.DefaultAggregationSelector,
|
||||
},
|
||||
RetryConfig: retry.DefaultConfig,
|
||||
}
|
||||
cfg = ApplyHTTPEnvConfigs(cfg)
|
||||
for _, opt := range opts {
|
||||
cfg = opt.ApplyHTTPOption(cfg)
|
||||
}
|
||||
cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath)
|
||||
return cfg
|
||||
}
|
||||
|
||||
// cleanPath returns a path with all spaces trimmed and all redundancies
|
||||
// removed. If urlPath is empty or cleaning it results in an empty string,
|
||||
// defaultPath is returned instead.
|
||||
func cleanPath(urlPath string, defaultPath string) string {
|
||||
tmp := path.Clean(strings.TrimSpace(urlPath))
|
||||
if tmp == "." {
|
||||
return defaultPath
|
||||
}
|
||||
if !path.IsAbs(tmp) {
|
||||
tmp = fmt.Sprintf("/%s", tmp)
|
||||
}
|
||||
return tmp
|
||||
}
|
||||
|
||||
// NewGRPCConfig returns a new Config with all settings applied from opts and
|
||||
// any unset setting using the default gRPC config values.
|
||||
func NewGRPCConfig(opts ...GRPCOption) Config {
|
||||
userAgent := "OTel OTLP Exporter Go/" + otlpmetric.Version()
|
||||
cfg := Config{
|
||||
Metrics: SignalConfig{
|
||||
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
|
||||
URLPath: DefaultMetricsPath,
|
||||
Compression: NoCompression,
|
||||
Timeout: DefaultTimeout,
|
||||
|
||||
TemporalitySelector: metric.DefaultTemporalitySelector,
|
||||
AggregationSelector: metric.DefaultAggregationSelector,
|
||||
},
|
||||
RetryConfig: retry.DefaultConfig,
|
||||
DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
|
||||
}
|
||||
cfg = ApplyGRPCEnvConfigs(cfg)
|
||||
for _, opt := range opts {
|
||||
cfg = opt.ApplyGRPCOption(cfg)
|
||||
}
|
||||
|
||||
if cfg.ServiceConfig != "" {
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
||||
}
|
||||
// Priroritize GRPCCredentials over Insecure (passing both is an error).
|
||||
if cfg.Metrics.GRPCCredentials != nil {
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
|
||||
} else if cfg.Metrics.Insecure {
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
} else {
|
||||
// Default to using the host's root CA.
|
||||
creds := credentials.NewTLS(nil)
|
||||
cfg.Metrics.GRPCCredentials = creds
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
|
||||
}
|
||||
if cfg.Metrics.Compression == GzipCompression {
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
|
||||
}
|
||||
if len(cfg.DialOptions) != 0 {
|
||||
cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
|
||||
}
|
||||
if cfg.ReconnectionPeriod != 0 {
|
||||
p := grpc.ConnectParams{
|
||||
Backoff: backoff.DefaultConfig,
|
||||
MinConnectTimeout: cfg.ReconnectionPeriod,
|
||||
}
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
type (
|
||||
// GenericOption applies an option to the HTTP or gRPC driver.
|
||||
GenericOption interface {
|
||||
ApplyHTTPOption(Config) Config
|
||||
ApplyGRPCOption(Config) Config
|
||||
|
||||
// A private method to prevent users implementing the
|
||||
// interface and so future additions to it will not
|
||||
// violate compatibility.
|
||||
private()
|
||||
}
|
||||
|
||||
// HTTPOption applies an option to the HTTP driver.
|
||||
HTTPOption interface {
|
||||
ApplyHTTPOption(Config) Config
|
||||
|
||||
// A private method to prevent users implementing the
|
||||
// interface and so future additions to it will not
|
||||
// violate compatibility.
|
||||
private()
|
||||
}
|
||||
|
||||
// GRPCOption applies an option to the gRPC driver.
|
||||
GRPCOption interface {
|
||||
ApplyGRPCOption(Config) Config
|
||||
|
||||
// A private method to prevent users implementing the
|
||||
// interface and so future additions to it will not
|
||||
// violate compatibility.
|
||||
private()
|
||||
}
|
||||
)
|
||||
|
||||
// genericOption is an option that applies the same logic
|
||||
// for both gRPC and HTTP.
|
||||
type genericOption struct {
|
||||
fn func(Config) Config
|
||||
}
|
||||
|
||||
func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
|
||||
return g.fn(cfg)
|
||||
}
|
||||
|
||||
func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
|
||||
return g.fn(cfg)
|
||||
}
|
||||
|
||||
func (genericOption) private() {}
|
||||
|
||||
func newGenericOption(fn func(cfg Config) Config) GenericOption {
|
||||
return &genericOption{fn: fn}
|
||||
}
|
||||
|
||||
// splitOption is an option that applies different logics
|
||||
// for gRPC and HTTP.
|
||||
type splitOption struct {
|
||||
httpFn func(Config) Config
|
||||
grpcFn func(Config) Config
|
||||
}
|
||||
|
||||
func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
|
||||
return g.grpcFn(cfg)
|
||||
}
|
||||
|
||||
func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
|
||||
return g.httpFn(cfg)
|
||||
}
|
||||
|
||||
func (splitOption) private() {}
|
||||
|
||||
func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
|
||||
return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
|
||||
}
|
||||
|
||||
// httpOption is an option that is only applied to the HTTP driver.
|
||||
type httpOption struct {
|
||||
fn func(Config) Config
|
||||
}
|
||||
|
||||
func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
|
||||
return h.fn(cfg)
|
||||
}
|
||||
|
||||
func (httpOption) private() {}
|
||||
|
||||
func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
|
||||
return &httpOption{fn: fn}
|
||||
}
|
||||
|
||||
// grpcOption is an option that is only applied to the gRPC driver.
|
||||
type grpcOption struct {
|
||||
fn func(Config) Config
|
||||
}
|
||||
|
||||
func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
|
||||
return h.fn(cfg)
|
||||
}
|
||||
|
||||
func (grpcOption) private() {}
|
||||
|
||||
func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
|
||||
return &grpcOption{fn: fn}
|
||||
}
|
||||
|
||||
// Generic Options
|
||||
|
||||
func WithEndpoint(endpoint string) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Endpoint = endpoint
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithCompression(compression Compression) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Compression = compression
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithURLPath(urlPath string) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.URLPath = urlPath
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithRetry(rc retry.Config) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.RetryConfig = rc
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
|
||||
return newSplitOption(func(cfg Config) Config {
|
||||
cfg.Metrics.TLSCfg = tlsCfg.Clone()
|
||||
return cfg
|
||||
}, func(cfg Config) Config {
|
||||
cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithInsecure() GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Insecure = true
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithSecure() GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Insecure = false
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithHeaders(headers map[string]string) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Headers = headers
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithTimeout(duration time.Duration) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Timeout = duration
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.TemporalitySelector = selector
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithAggregationSelector(selector metric.AggregationSelector) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.AggregationSelector = selector
|
||||
return cfg
|
||||
})
|
||||
}
|
58
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
generated
vendored
Normal file
58
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
// DefaultCollectorGRPCPort is the default gRPC port of the collector.
|
||||
DefaultCollectorGRPCPort uint16 = 4317
|
||||
// DefaultCollectorHTTPPort is the default HTTP port of the collector.
|
||||
DefaultCollectorHTTPPort uint16 = 4318
|
||||
// DefaultCollectorHost is the host address the Exporter will attempt
|
||||
// connect to if no collector address is provided.
|
||||
DefaultCollectorHost string = "localhost"
|
||||
)
|
||||
|
||||
// Compression describes the compression used for payloads sent to the
|
||||
// collector.
|
||||
type Compression int
|
||||
|
||||
const (
|
||||
// NoCompression tells the driver to send payloads without
|
||||
// compression.
|
||||
NoCompression Compression = iota
|
||||
// GzipCompression tells the driver to send payloads after
|
||||
// compressing them with gzip.
|
||||
GzipCompression
|
||||
)
|
||||
|
||||
// RetrySettings defines configuration for retrying batches in case of export failure
|
||||
// using an exponential backoff.
|
||||
type RetrySettings struct {
|
||||
// Enabled indicates whether to not retry sending batches in case of export failure.
|
||||
Enabled bool
|
||||
// InitialInterval the time to wait after the first failure before retrying.
|
||||
InitialInterval time.Duration
|
||||
// MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
|
||||
// consecutive retries will always be `MaxInterval`.
|
||||
MaxInterval time.Duration
|
||||
// MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
|
||||
// Once this value is reached, the data is discarded.
|
||||
MaxElapsedTime time.Duration
|
||||
}
|
49
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
generated
vendored
Normal file
49
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ReadTLSConfigFromFile reads a PEM certificate file and creates
|
||||
// a tls.Config that will use this certifate to verify a server certificate.
|
||||
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return CreateTLSConfig(b)
|
||||
}
|
||||
|
||||
// CreateTLSConfig creates a tls.Config from a raw certificate bytes
|
||||
// to verify a server certificate.
|
||||
func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
|
||||
cp := x509.NewCertPool()
|
||||
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
|
||||
return nil, errors.New("failed to append certificate to the cert pool")
|
||||
}
|
||||
|
||||
return &tls.Config{
|
||||
RootCAs: cp,
|
||||
}, nil
|
||||
}
|
67
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
generated
vendored
Normal file
67
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/partialsuccess.go
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal"
|
||||
|
||||
import "fmt"
|
||||
|
||||
// PartialSuccess represents the underlying error for all handling
|
||||
// OTLP partial success messages. Use `errors.Is(err,
|
||||
// PartialSuccess{})` to test whether an error passed to the OTel
|
||||
// error handler belongs to this category.
|
||||
type PartialSuccess struct {
|
||||
ErrorMessage string
|
||||
RejectedItems int64
|
||||
RejectedKind string
|
||||
}
|
||||
|
||||
var _ error = PartialSuccess{}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (ps PartialSuccess) Error() string {
|
||||
msg := ps.ErrorMessage
|
||||
if msg == "" {
|
||||
msg = "empty message"
|
||||
}
|
||||
return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
|
||||
}
|
||||
|
||||
// Is supports the errors.Is() interface.
|
||||
func (ps PartialSuccess) Is(err error) bool {
|
||||
_, ok := err.(PartialSuccess)
|
||||
return ok
|
||||
}
|
||||
|
||||
// TracePartialSuccessError returns an error describing a partial success
|
||||
// response for the trace signal.
|
||||
func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
|
||||
return PartialSuccess{
|
||||
ErrorMessage: errorMessage,
|
||||
RejectedItems: itemsRejected,
|
||||
RejectedKind: "spans",
|
||||
}
|
||||
}
|
||||
|
||||
// MetricPartialSuccessError returns an error describing a partial success
|
||||
// response for the metric signal.
|
||||
func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
|
||||
return PartialSuccess{
|
||||
ErrorMessage: errorMessage,
|
||||
RejectedItems: itemsRejected,
|
||||
RejectedKind: "metric data points",
|
||||
}
|
||||
}
|
156
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
generated
vendored
Normal file
156
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/retry/retry.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package retry provides request retry functionality that can perform
|
||||
// configurable exponential backoff for transient errors and honor any
|
||||
// explicit throttle responses received.
|
||||
package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
)
|
||||
|
||||
// DefaultConfig are the recommended defaults to use.
|
||||
var DefaultConfig = Config{
|
||||
Enabled: true,
|
||||
InitialInterval: 5 * time.Second,
|
||||
MaxInterval: 30 * time.Second,
|
||||
MaxElapsedTime: time.Minute,
|
||||
}
|
||||
|
||||
// Config defines configuration for retrying batches in case of export failure
|
||||
// using an exponential backoff.
|
||||
type Config struct {
|
||||
// Enabled indicates whether to not retry sending batches in case of
|
||||
// export failure.
|
||||
Enabled bool
|
||||
// InitialInterval the time to wait after the first failure before
|
||||
// retrying.
|
||||
InitialInterval time.Duration
|
||||
// MaxInterval is the upper bound on backoff interval. Once this value is
|
||||
// reached the delay between consecutive retries will always be
|
||||
// `MaxInterval`.
|
||||
MaxInterval time.Duration
|
||||
// MaxElapsedTime is the maximum amount of time (including retries) spent
|
||||
// trying to send a request/batch. Once this value is reached, the data
|
||||
// is discarded.
|
||||
MaxElapsedTime time.Duration
|
||||
}
|
||||
|
||||
// RequestFunc wraps a request with retry logic.
|
||||
type RequestFunc func(context.Context, func(context.Context) error) error
|
||||
|
||||
// EvaluateFunc returns if an error is retry-able and if an explicit throttle
|
||||
// duration should be honored that was included in the error.
|
||||
//
|
||||
// The function must return true if the error argument is retry-able,
|
||||
// otherwise it must return false for the first return parameter.
|
||||
//
|
||||
// The function must return a non-zero time.Duration if the error contains
|
||||
// explicit throttle duration that should be honored, otherwise it must return
|
||||
// a zero valued time.Duration.
|
||||
type EvaluateFunc func(error) (bool, time.Duration)
|
||||
|
||||
// RequestFunc returns a RequestFunc using the evaluate function to determine
|
||||
// if requests can be retried and based on the exponential backoff
|
||||
// configuration of c.
|
||||
func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
if !c.Enabled {
|
||||
return func(ctx context.Context, fn func(context.Context) error) error {
|
||||
return fn(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
return func(ctx context.Context, fn func(context.Context) error) error {
|
||||
// Do not use NewExponentialBackOff since it calls Reset and the code here
|
||||
// must call Reset after changing the InitialInterval (this saves an
|
||||
// unnecessary call to Now).
|
||||
b := &backoff.ExponentialBackOff{
|
||||
InitialInterval: c.InitialInterval,
|
||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||
Multiplier: backoff.DefaultMultiplier,
|
||||
MaxInterval: c.MaxInterval,
|
||||
MaxElapsedTime: c.MaxElapsedTime,
|
||||
Stop: backoff.Stop,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
|
||||
for {
|
||||
err := fn(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
retryable, throttle := evaluate(err)
|
||||
if !retryable {
|
||||
return err
|
||||
}
|
||||
|
||||
bOff := b.NextBackOff()
|
||||
if bOff == backoff.Stop {
|
||||
return fmt.Errorf("max retry time elapsed: %w", err)
|
||||
}
|
||||
|
||||
// Wait for the greater of the backoff or throttle delay.
|
||||
var delay time.Duration
|
||||
if bOff > throttle {
|
||||
delay = bOff
|
||||
} else {
|
||||
elapsed := b.GetElapsedTime()
|
||||
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
|
||||
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||
}
|
||||
delay = throttle
|
||||
}
|
||||
|
||||
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
|
||||
return fmt.Errorf("%w: %s", ctxErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allow override for testing.
|
||||
var waitFunc = wait
|
||||
|
||||
// wait takes the caller's context, and the amount of time to wait. It will
|
||||
// return nil if the timer fires before or at the same time as the context's
|
||||
// deadline. This indicates that the call can be retried.
|
||||
func wait(ctx context.Context, delay time.Duration) error {
|
||||
timer := time.NewTimer(delay)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Handle the case where the timer and context deadline end
|
||||
// simultaneously by prioritizing the timer expiration nil value
|
||||
// response.
|
||||
select {
|
||||
case <-timer.C:
|
||||
default:
|
||||
return ctx.Err()
|
||||
}
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
155
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
generated
vendored
Normal file
155
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go
generated
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
)
|
||||
|
||||
// AttrIter transforms an attribute iterator into OTLP key-values.
|
||||
func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
|
||||
l := iter.Len()
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]*cpb.KeyValue, 0, l)
|
||||
for iter.Next() {
|
||||
out = append(out, KeyValue(iter.Attribute()))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
|
||||
func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
|
||||
if len(attrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]*cpb.KeyValue, 0, len(attrs))
|
||||
for _, kv := range attrs {
|
||||
out = append(out, KeyValue(kv))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// KeyValue transforms an attribute KeyValue into an OTLP key-value.
|
||||
func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
|
||||
return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
|
||||
}
|
||||
|
||||
// Value transforms an attribute Value into an OTLP AnyValue.
|
||||
func Value(v attribute.Value) *cpb.AnyValue {
|
||||
av := new(cpb.AnyValue)
|
||||
switch v.Type() {
|
||||
case attribute.BOOL:
|
||||
av.Value = &cpb.AnyValue_BoolValue{
|
||||
BoolValue: v.AsBool(),
|
||||
}
|
||||
case attribute.BOOLSLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: boolSliceValues(v.AsBoolSlice()),
|
||||
},
|
||||
}
|
||||
case attribute.INT64:
|
||||
av.Value = &cpb.AnyValue_IntValue{
|
||||
IntValue: v.AsInt64(),
|
||||
}
|
||||
case attribute.INT64SLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: int64SliceValues(v.AsInt64Slice()),
|
||||
},
|
||||
}
|
||||
case attribute.FLOAT64:
|
||||
av.Value = &cpb.AnyValue_DoubleValue{
|
||||
DoubleValue: v.AsFloat64(),
|
||||
}
|
||||
case attribute.FLOAT64SLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: float64SliceValues(v.AsFloat64Slice()),
|
||||
},
|
||||
}
|
||||
case attribute.STRING:
|
||||
av.Value = &cpb.AnyValue_StringValue{
|
||||
StringValue: v.AsString(),
|
||||
}
|
||||
case attribute.STRINGSLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: stringSliceValues(v.AsStringSlice()),
|
||||
},
|
||||
}
|
||||
default:
|
||||
av.Value = &cpb.AnyValue_StringValue{
|
||||
StringValue: "INVALID",
|
||||
}
|
||||
}
|
||||
return av
|
||||
}
|
||||
|
||||
func boolSliceValues(vals []bool) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_BoolValue{
|
||||
BoolValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func int64SliceValues(vals []int64) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_IntValue{
|
||||
IntValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func float64SliceValues(vals []float64) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_DoubleValue{
|
||||
DoubleValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func stringSliceValues(vals []string) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_StringValue{
|
||||
StringValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
114
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
generated
vendored
Normal file
114
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go
generated
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
errUnknownAggregation = errors.New("unknown aggregation")
|
||||
errUnknownTemporality = errors.New("unknown temporality")
|
||||
)
|
||||
|
||||
type errMetric struct {
|
||||
m *mpb.Metric
|
||||
err error
|
||||
}
|
||||
|
||||
func (e errMetric) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
func (e errMetric) Error() string {
|
||||
format := "invalid metric (name: %q, description: %q, unit: %q): %s"
|
||||
return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
|
||||
}
|
||||
|
||||
func (e errMetric) Is(target error) bool {
|
||||
return errors.Is(e.err, target)
|
||||
}
|
||||
|
||||
// multiErr is used by the data-type transform functions to wrap multiple
|
||||
// errors into a single return value. The error message will show all errors
|
||||
// as a list and scope them by the datatype name that is returning them.
|
||||
type multiErr struct {
|
||||
datatype string
|
||||
errs []error
|
||||
}
|
||||
|
||||
// errOrNil returns nil if e contains no errors, otherwise it returns e.
|
||||
func (e *multiErr) errOrNil() error {
|
||||
if len(e.errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// append adds err to e. If err is a multiErr, its errs are flattened into e.
|
||||
func (e *multiErr) append(err error) {
|
||||
// Do not use errors.As here, this should only be flattened one layer. If
|
||||
// there is a *multiErr several steps down the chain, all the errors above
|
||||
// it will be discarded if errors.As is used instead.
|
||||
switch other := err.(type) {
|
||||
case *multiErr:
|
||||
// Flatten err errors into e.
|
||||
e.errs = append(e.errs, other.errs...)
|
||||
default:
|
||||
e.errs = append(e.errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *multiErr) Error() string {
|
||||
es := make([]string, len(e.errs))
|
||||
for i, err := range e.errs {
|
||||
es[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
format := "%d errors occurred transforming %s:\n\t%s"
|
||||
return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
|
||||
}
|
||||
|
||||
func (e *multiErr) Unwrap() error {
|
||||
switch len(e.errs) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return e.errs[0]
|
||||
}
|
||||
|
||||
// Return a multiErr without the leading error.
|
||||
cp := &multiErr{
|
||||
datatype: e.datatype,
|
||||
errs: make([]error, len(e.errs)-1),
|
||||
}
|
||||
copy(cp.errs, e.errs[1:])
|
||||
return cp
|
||||
}
|
||||
|
||||
func (e *multiErr) Is(target error) bool {
|
||||
if len(e.errs) == 0 {
|
||||
return false
|
||||
}
|
||||
// Check if the first error is target.
|
||||
return errors.Is(e.errs[0], target)
|
||||
}
|
292
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
generated
vendored
Normal file
292
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go
generated
vendored
Normal file
@@ -0,0 +1,292 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package transform provides transformation functionality from the
|
||||
// sdk/metric/metricdata data-types into OTLP data-types.
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
rpb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||
)
|
||||
|
||||
// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
|
||||
// contains invalid ScopeMetrics, an error will be returned along with an OTLP
|
||||
// ResourceMetrics that contains partial OTLP ScopeMetrics.
|
||||
func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
|
||||
sms, err := ScopeMetrics(rm.ScopeMetrics)
|
||||
return &mpb.ResourceMetrics{
|
||||
Resource: &rpb.Resource{
|
||||
Attributes: AttrIter(rm.Resource.Iter()),
|
||||
},
|
||||
ScopeMetrics: sms,
|
||||
SchemaUrl: rm.Resource.SchemaURL(),
|
||||
}, err
|
||||
}
|
||||
|
||||
// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
|
||||
// sms contains invalid metric values, an error will be returned along with a
|
||||
// slice that contains partial OTLP ScopeMetrics.
|
||||
func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
|
||||
errs := &multiErr{datatype: "ScopeMetrics"}
|
||||
out := make([]*mpb.ScopeMetrics, 0, len(sms))
|
||||
for _, sm := range sms {
|
||||
ms, err := Metrics(sm.Metrics)
|
||||
if err != nil {
|
||||
errs.append(err)
|
||||
}
|
||||
|
||||
out = append(out, &mpb.ScopeMetrics{
|
||||
Scope: &cpb.InstrumentationScope{
|
||||
Name: sm.Scope.Name,
|
||||
Version: sm.Scope.Version,
|
||||
},
|
||||
Metrics: ms,
|
||||
SchemaUrl: sm.Scope.SchemaURL,
|
||||
})
|
||||
}
|
||||
return out, errs.errOrNil()
|
||||
}
|
||||
|
||||
// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
|
||||
// invalid metric values, an error will be returned along with a slice that
|
||||
// contains partial OTLP Metrics.
|
||||
func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
|
||||
errs := &multiErr{datatype: "Metrics"}
|
||||
out := make([]*mpb.Metric, 0, len(ms))
|
||||
for _, m := range ms {
|
||||
o, err := metric(m)
|
||||
if err != nil {
|
||||
// Do not include invalid data. Drop the metric, report the error.
|
||||
errs.append(errMetric{m: o, err: err})
|
||||
continue
|
||||
}
|
||||
out = append(out, o)
|
||||
}
|
||||
return out, errs.errOrNil()
|
||||
}
|
||||
|
||||
func metric(m metricdata.Metrics) (*mpb.Metric, error) {
|
||||
var err error
|
||||
out := &mpb.Metric{
|
||||
Name: m.Name,
|
||||
Description: m.Description,
|
||||
Unit: string(m.Unit),
|
||||
}
|
||||
switch a := m.Data.(type) {
|
||||
case metricdata.Gauge[int64]:
|
||||
out.Data = Gauge[int64](a)
|
||||
case metricdata.Gauge[float64]:
|
||||
out.Data = Gauge[float64](a)
|
||||
case metricdata.Sum[int64]:
|
||||
out.Data, err = Sum[int64](a)
|
||||
case metricdata.Sum[float64]:
|
||||
out.Data, err = Sum[float64](a)
|
||||
case metricdata.Histogram[int64]:
|
||||
out.Data, err = Histogram(a)
|
||||
case metricdata.Histogram[float64]:
|
||||
out.Data, err = Histogram(a)
|
||||
case metricdata.ExponentialHistogram[int64]:
|
||||
out.Data, err = ExponentialHistogram(a)
|
||||
case metricdata.ExponentialHistogram[float64]:
|
||||
out.Data, err = ExponentialHistogram(a)
|
||||
default:
|
||||
return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
// Gauge returns an OTLP Metric_Gauge generated from g.
|
||||
func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
|
||||
return &mpb.Metric_Gauge{
|
||||
Gauge: &mpb.Gauge{
|
||||
DataPoints: DataPoints(g.DataPoints),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Sum returns an OTLP Metric_Sum generated from s. An error is returned
|
||||
// if the temporality of s is unknown.
|
||||
func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
|
||||
t, err := Temporality(s.Temporality)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mpb.Metric_Sum{
|
||||
Sum: &mpb.Sum{
|
||||
AggregationTemporality: t,
|
||||
IsMonotonic: s.IsMonotonic,
|
||||
DataPoints: DataPoints(s.DataPoints),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
|
||||
func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
|
||||
out := make([]*mpb.NumberDataPoint, 0, len(dPts))
|
||||
for _, dPt := range dPts {
|
||||
ndp := &mpb.NumberDataPoint{
|
||||
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||
}
|
||||
switch v := any(dPt.Value).(type) {
|
||||
case int64:
|
||||
ndp.Value = &mpb.NumberDataPoint_AsInt{
|
||||
AsInt: v,
|
||||
}
|
||||
case float64:
|
||||
ndp.Value = &mpb.NumberDataPoint_AsDouble{
|
||||
AsDouble: v,
|
||||
}
|
||||
}
|
||||
out = append(out, ndp)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Histogram returns an OTLP Metric_Histogram generated from h. An error is
|
||||
// returned if the temporality of h is unknown.
|
||||
func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) {
|
||||
t, err := Temporality(h.Temporality)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mpb.Metric_Histogram{
|
||||
Histogram: &mpb.Histogram{
|
||||
AggregationTemporality: t,
|
||||
DataPoints: HistogramDataPoints(h.DataPoints),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
|
||||
// from dPts.
|
||||
func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint {
|
||||
out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
|
||||
for _, dPt := range dPts {
|
||||
sum := float64(dPt.Sum)
|
||||
hdp := &mpb.HistogramDataPoint{
|
||||
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||
Count: dPt.Count,
|
||||
Sum: &sum,
|
||||
BucketCounts: dPt.BucketCounts,
|
||||
ExplicitBounds: dPt.Bounds,
|
||||
}
|
||||
if v, ok := dPt.Min.Value(); ok {
|
||||
vF64 := float64(v)
|
||||
hdp.Min = &vF64
|
||||
}
|
||||
if v, ok := dPt.Max.Value(); ok {
|
||||
vF64 := float64(v)
|
||||
hdp.Max = &vF64
|
||||
}
|
||||
out = append(out, hdp)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
|
||||
// returned if the temporality of h is unknown.
|
||||
func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
|
||||
t, err := Temporality(h.Temporality)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mpb.Metric_ExponentialHistogram{
|
||||
ExponentialHistogram: &mpb.ExponentialHistogram{
|
||||
AggregationTemporality: t,
|
||||
DataPoints: ExponentialHistogramDataPoints(h.DataPoints),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
|
||||
// from dPts.
|
||||
func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
|
||||
out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
|
||||
for _, dPt := range dPts {
|
||||
sum := float64(dPt.Sum)
|
||||
ehdp := &mpb.ExponentialHistogramDataPoint{
|
||||
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||
Count: dPt.Count,
|
||||
Sum: &sum,
|
||||
Scale: dPt.Scale,
|
||||
ZeroCount: dPt.ZeroCount,
|
||||
|
||||
Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket),
|
||||
Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket),
|
||||
}
|
||||
if v, ok := dPt.Min.Value(); ok {
|
||||
vF64 := float64(v)
|
||||
ehdp.Min = &vF64
|
||||
}
|
||||
if v, ok := dPt.Max.Value(); ok {
|
||||
vF64 := float64(v)
|
||||
ehdp.Max = &vF64
|
||||
}
|
||||
out = append(out, ehdp)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
|
||||
// from bucket.
|
||||
func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
|
||||
return &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||
Offset: bucket.Offset,
|
||||
BucketCounts: bucket.Counts,
|
||||
}
|
||||
}
|
||||
|
||||
// Temporality returns an OTLP AggregationTemporality generated from t. If t
|
||||
// is unknown, an error is returned along with the invalid
|
||||
// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
|
||||
func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
|
||||
switch t {
|
||||
case metricdata.DeltaTemporality:
|
||||
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
|
||||
case metricdata.CumulativeTemporality:
|
||||
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
|
||||
default:
|
||||
err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
|
||||
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
|
||||
}
|
||||
}
|
||||
|
||||
// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
|
||||
// since January 1, 1970 UTC as uint64.
|
||||
// The result is undefined if the Unix time
|
||||
// in nanoseconds cannot be represented by an int64
|
||||
// (a date before the year 1678 or after 2262).
|
||||
// timeUnixNano on the zero Time returns 0.
|
||||
// The result does not depend on the location associated with t.
|
||||
func timeUnixNano(t time.Time) uint64 {
|
||||
if t.IsZero() {
|
||||
return 0
|
||||
}
|
||||
return uint64(t.UnixNano())
|
||||
}
|
201
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE
generated
vendored
Normal file
201
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
297
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
generated
vendored
Normal file
297
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
generated
vendored
Normal file
@@ -0,0 +1,297 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
|
||||
colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
type client struct {
|
||||
// req is cloned for every upload the client makes.
|
||||
req *http.Request
|
||||
compression Compression
|
||||
requestFunc retry.RequestFunc
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// Keep it in sync with golang's DefaultTransport from net/http! We
|
||||
// have our own copy to avoid handling a situation where the
|
||||
// DefaultTransport is overwritten with some different implementation
|
||||
// of http.RoundTripper or it's modified by another package.
|
||||
var ourTransport = &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
ForceAttemptHTTP2: true,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
// newClient creates a new HTTP metric client.
|
||||
func newClient(cfg oconf.Config) (*client, error) {
|
||||
httpClient := &http.Client{
|
||||
Transport: ourTransport,
|
||||
Timeout: cfg.Metrics.Timeout,
|
||||
}
|
||||
if cfg.Metrics.TLSCfg != nil {
|
||||
transport := ourTransport.Clone()
|
||||
transport.TLSClientConfig = cfg.Metrics.TLSCfg
|
||||
httpClient.Transport = transport
|
||||
}
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: "https",
|
||||
Host: cfg.Metrics.Endpoint,
|
||||
Path: cfg.Metrics.URLPath,
|
||||
}
|
||||
if cfg.Metrics.Insecure {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
// Body is set when this is cloned during upload.
|
||||
req, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
userAgent := "OTel OTLP Exporter Go/" + otlpmetric.Version()
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
|
||||
if n := len(cfg.Metrics.Headers); n > 0 {
|
||||
for k, v := range cfg.Metrics.Headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-protobuf")
|
||||
|
||||
return &client{
|
||||
compression: Compression(cfg.Metrics.Compression),
|
||||
req: req,
|
||||
requestFunc: cfg.RetryConfig.RequestFunc(evaluate),
|
||||
httpClient: httpClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Shutdown shuts down the client, freeing all resources.
|
||||
func (c *client) Shutdown(ctx context.Context) error {
|
||||
// The otlpmetric.Exporter synchronizes access to client methods and
|
||||
// ensures this is called only once. The only thing that needs to be done
|
||||
// here is to release any computational resources the client holds.
|
||||
|
||||
c.requestFunc = nil
|
||||
c.httpClient = nil
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
// UploadMetrics sends protoMetrics to the connected endpoint.
|
||||
//
|
||||
// Retryable errors from the server will be handled according to any
|
||||
// RetryConfig the client was created with.
|
||||
func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error {
|
||||
// The otlpmetric.Exporter synchronizes access to client methods, and
|
||||
// ensures this is not called after the Exporter is shutdown. Only thing
|
||||
// to do here is send data.
|
||||
|
||||
pbRequest := &colmetricpb.ExportMetricsServiceRequest{
|
||||
ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics},
|
||||
}
|
||||
body, err := proto.Marshal(pbRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
request, err := c.newRequest(ctx, body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.requestFunc(ctx, func(iCtx context.Context) error {
|
||||
select {
|
||||
case <-iCtx.Done():
|
||||
return iCtx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
request.reset(iCtx)
|
||||
resp, err := c.httpClient.Do(request.Request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var rErr error
|
||||
switch sc := resp.StatusCode; {
|
||||
case sc >= 200 && sc <= 299:
|
||||
// Success, do not retry.
|
||||
|
||||
// Read the partial success message, if any.
|
||||
var respData bytes.Buffer
|
||||
if _, err := io.Copy(&respData, resp.Body); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if respData.Len() != 0 {
|
||||
var respProto colmetricpb.ExportMetricsServiceResponse
|
||||
if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if respProto.PartialSuccess != nil {
|
||||
msg := respProto.PartialSuccess.GetErrorMessage()
|
||||
n := respProto.PartialSuccess.GetRejectedDataPoints()
|
||||
if n != 0 || msg != "" {
|
||||
err := internal.MetricPartialSuccessError(n, msg)
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case sc == http.StatusTooManyRequests, sc == http.StatusServiceUnavailable:
|
||||
// Retry-able failure.
|
||||
rErr = newResponseError(resp.Header)
|
||||
|
||||
// Going to retry, drain the body to reuse the connection.
|
||||
if _, err := io.Copy(io.Discard, resp.Body); err != nil {
|
||||
_ = resp.Body.Close()
|
||||
return err
|
||||
}
|
||||
default:
|
||||
rErr = fmt.Errorf("failed to send metrics to %s: %s", request.URL, resp.Status)
|
||||
}
|
||||
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return rErr
|
||||
})
|
||||
}
|
||||
|
||||
var gzPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
w := gzip.NewWriter(io.Discard)
|
||||
return w
|
||||
},
|
||||
}
|
||||
|
||||
func (c *client) newRequest(ctx context.Context, body []byte) (request, error) {
|
||||
r := c.req.Clone(ctx)
|
||||
req := request{Request: r}
|
||||
|
||||
switch c.compression {
|
||||
case NoCompression:
|
||||
r.ContentLength = (int64)(len(body))
|
||||
req.bodyReader = bodyReader(body)
|
||||
case GzipCompression:
|
||||
// Ensure the content length is not used.
|
||||
r.ContentLength = -1
|
||||
r.Header.Set("Content-Encoding", "gzip")
|
||||
|
||||
gz := gzPool.Get().(*gzip.Writer)
|
||||
defer gzPool.Put(gz)
|
||||
|
||||
var b bytes.Buffer
|
||||
gz.Reset(&b)
|
||||
|
||||
if _, err := gz.Write(body); err != nil {
|
||||
return req, err
|
||||
}
|
||||
// Close needs to be called to ensure body if fully written.
|
||||
if err := gz.Close(); err != nil {
|
||||
return req, err
|
||||
}
|
||||
|
||||
req.bodyReader = bodyReader(b.Bytes())
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// bodyReader returns a closure returning a new reader for buf.
|
||||
func bodyReader(buf []byte) func() io.ReadCloser {
|
||||
return func() io.ReadCloser {
|
||||
return io.NopCloser(bytes.NewReader(buf))
|
||||
}
|
||||
}
|
||||
|
||||
// request wraps an http.Request with a resettable body reader.
|
||||
type request struct {
|
||||
*http.Request
|
||||
|
||||
// bodyReader allows the same body to be used for multiple requests.
|
||||
bodyReader func() io.ReadCloser
|
||||
}
|
||||
|
||||
// reset reinitializes the request Body and uses ctx for the request.
|
||||
func (r *request) reset(ctx context.Context) {
|
||||
r.Body = r.bodyReader()
|
||||
r.Request = r.Request.WithContext(ctx)
|
||||
}
|
||||
|
||||
// retryableError represents a request failure that can be retried.
|
||||
type retryableError struct {
|
||||
throttle int64
|
||||
}
|
||||
|
||||
// newResponseError returns a retryableError and will extract any explicit
|
||||
// throttle delay contained in headers.
|
||||
func newResponseError(header http.Header) error {
|
||||
var rErr retryableError
|
||||
if v := header.Get("Retry-After"); v != "" {
|
||||
if t, err := strconv.ParseInt(v, 10, 64); err == nil {
|
||||
rErr.throttle = t
|
||||
}
|
||||
}
|
||||
return rErr
|
||||
}
|
||||
|
||||
func (e retryableError) Error() string {
|
||||
return "retry-able request failure"
|
||||
}
|
||||
|
||||
// evaluate returns if err is retry-able. If it is and it includes an explicit
|
||||
// throttling delay, that delay is also returned.
|
||||
func evaluate(err error) (bool, time.Duration) {
|
||||
if err == nil {
|
||||
return false, 0
|
||||
}
|
||||
|
||||
rErr, ok := err.(retryableError)
|
||||
if !ok {
|
||||
return false, 0
|
||||
}
|
||||
|
||||
return true, time.Duration(rErr.throttle)
|
||||
}
|
199
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go
generated
vendored
Normal file
199
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go
generated
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
)
|
||||
|
||||
// Compression describes the compression used for payloads sent to the
|
||||
// collector.
|
||||
type Compression oconf.Compression
|
||||
|
||||
const (
|
||||
// NoCompression tells the driver to send payloads without
|
||||
// compression.
|
||||
NoCompression = Compression(oconf.NoCompression)
|
||||
// GzipCompression tells the driver to send payloads after
|
||||
// compressing them with gzip.
|
||||
GzipCompression = Compression(oconf.GzipCompression)
|
||||
)
|
||||
|
||||
// Option applies an option to the Exporter.
|
||||
type Option interface {
|
||||
applyHTTPOption(oconf.Config) oconf.Config
|
||||
}
|
||||
|
||||
func asHTTPOptions(opts []Option) []oconf.HTTPOption {
|
||||
converted := make([]oconf.HTTPOption, len(opts))
|
||||
for i, o := range opts {
|
||||
converted[i] = oconf.NewHTTPOption(o.applyHTTPOption)
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
// RetryConfig defines configuration for retrying the export of metric data
|
||||
// that failed.
|
||||
type RetryConfig retry.Config
|
||||
|
||||
type wrappedOption struct {
|
||||
oconf.HTTPOption
|
||||
}
|
||||
|
||||
func (w wrappedOption) applyHTTPOption(cfg oconf.Config) oconf.Config {
|
||||
return w.ApplyHTTPOption(cfg)
|
||||
}
|
||||
|
||||
// WithEndpoint sets the target endpoint the Exporter will connect to. This
|
||||
// endpoint is specified as a host and optional port, no path or scheme should
|
||||
// be included (see WithInsecure and WithURLPath).
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, "localhost:4318" will be used.
|
||||
func WithEndpoint(endpoint string) Option {
|
||||
return wrappedOption{oconf.WithEndpoint(endpoint)}
|
||||
}
|
||||
|
||||
// WithCompression sets the compression strategy the Exporter will use to
|
||||
// compress the HTTP body.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_COMPRESSION or
|
||||
// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
|
||||
// this option is not passed, that variable value will be used. That value can
|
||||
// be either "none" or "gzip". If both are set,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, no compression strategy will be used.
|
||||
func WithCompression(compression Compression) Option {
|
||||
return wrappedOption{oconf.WithCompression(oconf.Compression(compression))}
|
||||
}
|
||||
|
||||
// WithURLPath sets the URL path the Exporter will send requests to.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// environment variable is set, and this option is not passed, the path
|
||||
// contained in that variable value will be used. If both are set,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, "/v1/metrics" will be used.
|
||||
func WithURLPath(urlPath string) Option {
|
||||
return wrappedOption{oconf.WithURLPath(urlPath)}
|
||||
}
|
||||
|
||||
// WithTLSClientConfig sets the TLS configuration the Exporter will use for
|
||||
// HTTP requests.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_CERTIFICATE or
|
||||
// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and
|
||||
// this option is not passed, that variable value will be used. The value will
|
||||
// be parsed the filepath of the TLS certificate chain to use. If both are
|
||||
// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, the system default configuration is used.
|
||||
func WithTLSClientConfig(tlsCfg *tls.Config) Option {
|
||||
return wrappedOption{oconf.WithTLSClientConfig(tlsCfg)}
|
||||
}
|
||||
|
||||
// WithInsecure disables client transport security for the Exporter's HTTP
|
||||
// connection.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used to determine client security. If the endpoint has a
|
||||
// scheme of "http" or "unix" client security will be disabled. If both are
|
||||
// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, client security will be used.
|
||||
func WithInsecure() Option {
|
||||
return wrappedOption{oconf.WithInsecure()}
|
||||
}
|
||||
|
||||
// WithHeaders will send the provided headers with each HTTP requests.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. The value will be parsed as a list of key value pairs.
|
||||
// These pairs are expected to be in the W3C Correlation-Context format
|
||||
// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If
|
||||
// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, no user headers will be set.
|
||||
func WithHeaders(headers map[string]string) Option {
|
||||
return wrappedOption{oconf.WithHeaders(headers)}
|
||||
}
|
||||
|
||||
// WithTimeout sets the max amount of time an Exporter will attempt an export.
|
||||
//
|
||||
// This takes precedence over any retry settings defined by WithRetry. Once
|
||||
// this time limit has been reached the export is abandoned and the metric
|
||||
// data is dropped.
|
||||
//
|
||||
// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
|
||||
// environment variable is set, and this option is not passed, that variable
|
||||
// value will be used. The value will be parsed as an integer representing the
|
||||
// timeout in milliseconds. If both are set,
|
||||
// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence.
|
||||
//
|
||||
// By default, if an environment variable is not set, and this option is not
|
||||
// passed, a timeout of 10 seconds will be used.
|
||||
func WithTimeout(duration time.Duration) Option {
|
||||
return wrappedOption{oconf.WithTimeout(duration)}
|
||||
}
|
||||
|
||||
// WithRetry sets the retry policy for transient retryable errors that are
|
||||
// returned by the target endpoint.
|
||||
//
|
||||
// If the target endpoint responds with not only a retryable error, but
|
||||
// explicitly returns a backoff time in the response, that time will take
|
||||
// precedence over these settings.
|
||||
//
|
||||
// If unset, the default retry policy will be used. It will retry the export
|
||||
// 5 seconds after receiving a retryable error and increase exponentially
|
||||
// after each error for no more than a total time of 1 minute.
|
||||
func WithRetry(rc RetryConfig) Option {
|
||||
return wrappedOption{oconf.WithRetry(retry.Config(rc))}
|
||||
}
|
||||
|
||||
// WithTemporalitySelector sets the TemporalitySelector the client will use to
|
||||
// determine the Temporality of an instrument based on its kind. If this option
|
||||
// is not used, the client will use the DefaultTemporalitySelector from the
|
||||
// go.opentelemetry.io/otel/sdk/metric package.
|
||||
func WithTemporalitySelector(selector metric.TemporalitySelector) Option {
|
||||
return wrappedOption{oconf.WithTemporalitySelector(selector)}
|
||||
}
|
||||
|
||||
// WithAggregationSelector sets the AggregationSelector the client will use to
|
||||
// determine the aggregation to use for an instrument based on its kind. If
|
||||
// this option is not used, the reader will use the DefaultAggregationSelector
|
||||
// from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation
|
||||
// explicitly passed for a view matching an instrument.
|
||||
func WithAggregationSelector(selector metric.AggregationSelector) Option {
|
||||
return wrappedOption{oconf.WithAggregationSelector(selector)}
|
||||
}
|
18
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go
generated
vendored
Normal file
18
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package otlpmetrichttp provides an otlpmetric.Exporter that communicates
|
||||
// with an OTLP receiving endpoint using protobuf encoded metric data over
|
||||
// HTTP.
|
||||
package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
162
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go
generated
vendored
Normal file
162
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
metricpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
// Exporter is a OpenTelemetry metric Exporter using protobufs over HTTP.
|
||||
type Exporter struct {
|
||||
// Ensure synchronous access to the client across all functionality.
|
||||
clientMu sync.Mutex
|
||||
client interface {
|
||||
UploadMetrics(context.Context, *metricpb.ResourceMetrics) error
|
||||
Shutdown(context.Context) error
|
||||
}
|
||||
|
||||
temporalitySelector metric.TemporalitySelector
|
||||
aggregationSelector metric.AggregationSelector
|
||||
|
||||
shutdownOnce sync.Once
|
||||
}
|
||||
|
||||
func newExporter(c *client, cfg oconf.Config) (*Exporter, error) {
|
||||
ts := cfg.Metrics.TemporalitySelector
|
||||
if ts == nil {
|
||||
ts = func(metric.InstrumentKind) metricdata.Temporality {
|
||||
return metricdata.CumulativeTemporality
|
||||
}
|
||||
}
|
||||
|
||||
as := cfg.Metrics.AggregationSelector
|
||||
if as == nil {
|
||||
as = metric.DefaultAggregationSelector
|
||||
}
|
||||
|
||||
return &Exporter{
|
||||
client: c,
|
||||
|
||||
temporalitySelector: ts,
|
||||
aggregationSelector: as,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Temporality returns the Temporality to use for an instrument kind.
|
||||
func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality {
|
||||
return e.temporalitySelector(k)
|
||||
}
|
||||
|
||||
// Aggregation returns the Aggregation to use for an instrument kind.
|
||||
func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation {
|
||||
return e.aggregationSelector(k)
|
||||
}
|
||||
|
||||
// Export transforms and transmits metric data to an OTLP receiver.
|
||||
//
|
||||
// This method returns an error if called after Shutdown.
|
||||
// This method returns an error if the method is canceled by the passed context.
|
||||
func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
|
||||
defer global.Debug("OTLP/HTTP exporter export", "Data", rm)
|
||||
|
||||
otlpRm, err := transform.ResourceMetrics(rm)
|
||||
// Best effort upload of transformable metrics.
|
||||
e.clientMu.Lock()
|
||||
upErr := e.client.UploadMetrics(ctx, otlpRm)
|
||||
e.clientMu.Unlock()
|
||||
if upErr != nil {
|
||||
if err == nil {
|
||||
return fmt.Errorf("failed to upload metrics: %w", upErr)
|
||||
}
|
||||
// Merge the two errors.
|
||||
return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ForceFlush flushes any metric data held by an exporter.
|
||||
//
|
||||
// This method returns an error if called after Shutdown.
|
||||
// This method returns an error if the method is canceled by the passed context.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (e *Exporter) ForceFlush(ctx context.Context) error {
|
||||
// The exporter and client hold no state, nothing to flush.
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
// Shutdown flushes all metric data held by an exporter and releases any held
|
||||
// computational resources.
|
||||
//
|
||||
// This method returns an error if called after Shutdown.
|
||||
// This method returns an error if the method is canceled by the passed context.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (e *Exporter) Shutdown(ctx context.Context) error {
|
||||
err := errShutdown
|
||||
e.shutdownOnce.Do(func() {
|
||||
e.clientMu.Lock()
|
||||
client := e.client
|
||||
e.client = shutdownClient{}
|
||||
e.clientMu.Unlock()
|
||||
err = client.Shutdown(ctx)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
var errShutdown = fmt.Errorf("HTTP exporter is shutdown")
|
||||
|
||||
type shutdownClient struct{}
|
||||
|
||||
func (c shutdownClient) err(ctx context.Context) error {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return errShutdown
|
||||
}
|
||||
|
||||
func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error {
|
||||
return c.err(ctx)
|
||||
}
|
||||
|
||||
func (c shutdownClient) Shutdown(ctx context.Context) error {
|
||||
return c.err(ctx)
|
||||
}
|
||||
|
||||
// MarshalLog returns logging data about the Exporter.
|
||||
func (e *Exporter) MarshalLog() interface{} {
|
||||
return struct{ Type string }{Type: "OTLP/HTTP"}
|
||||
}
|
||||
|
||||
// New returns an OpenTelemetry metric Exporter. The Exporter can be used with
|
||||
// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving
|
||||
// endpoint using protobufs over HTTP.
|
||||
func New(_ context.Context, opts ...Option) (*Exporter, error) {
|
||||
cfg := oconf.NewHTTPConfig(asHTTPOptions(opts)...)
|
||||
c, err := newClient(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newExporter(c, cfg)
|
||||
}
|
202
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go
generated
vendored
Normal file
202
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
)
|
||||
|
||||
// ConfigFn is the generic function used to set a config.
|
||||
type ConfigFn func(*EnvOptionsReader)
|
||||
|
||||
// EnvOptionsReader reads the required environment variables.
|
||||
type EnvOptionsReader struct {
|
||||
GetEnv func(string) string
|
||||
ReadFile func(string) ([]byte, error)
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// Apply runs every ConfigFn.
|
||||
func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
|
||||
for _, o := range opts {
|
||||
o(e)
|
||||
}
|
||||
}
|
||||
|
||||
// GetEnvValue gets an OTLP environment variable value of the specified key
|
||||
// using the GetEnv function.
|
||||
// This function prepends the OTLP specified namespace to all key lookups.
|
||||
func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
|
||||
v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
|
||||
return v, v != ""
|
||||
}
|
||||
|
||||
// WithString retrieves the specified config and passes it to ConfigFn as a string.
|
||||
func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
fn(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
|
||||
func WithBool(n string, fn func(bool)) ConfigFn {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
b := strings.ToLower(v) == "true"
|
||||
fn(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
|
||||
func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
d, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
global.Error(err, "parse duration", "input", v)
|
||||
return
|
||||
}
|
||||
fn(time.Duration(d) * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
|
||||
func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
fn(stringToHeader(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
|
||||
func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
u, err := url.Parse(v)
|
||||
if err != nil {
|
||||
global.Error(err, "parse url", "input", v)
|
||||
return
|
||||
}
|
||||
fn(u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
|
||||
func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
|
||||
return func(e *EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
b, err := e.ReadFile(v)
|
||||
if err != nil {
|
||||
global.Error(err, "read tls ca cert file", "file", v)
|
||||
return
|
||||
}
|
||||
c, err := createCertPool(b)
|
||||
if err != nil {
|
||||
global.Error(err, "create tls cert pool")
|
||||
return
|
||||
}
|
||||
fn(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
|
||||
func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
|
||||
return func(e *EnvOptionsReader) {
|
||||
vc, okc := e.GetEnvValue(nc)
|
||||
vk, okk := e.GetEnvValue(nk)
|
||||
if !okc || !okk {
|
||||
return
|
||||
}
|
||||
cert, err := e.ReadFile(vc)
|
||||
if err != nil {
|
||||
global.Error(err, "read tls client cert", "file", vc)
|
||||
return
|
||||
}
|
||||
key, err := e.ReadFile(vk)
|
||||
if err != nil {
|
||||
global.Error(err, "read tls client key", "file", vk)
|
||||
return
|
||||
}
|
||||
crt, err := tls.X509KeyPair(cert, key)
|
||||
if err != nil {
|
||||
global.Error(err, "create tls client key pair")
|
||||
return
|
||||
}
|
||||
fn(crt)
|
||||
}
|
||||
}
|
||||
|
||||
func keyWithNamespace(ns, key string) string {
|
||||
if ns == "" {
|
||||
return key
|
||||
}
|
||||
return fmt.Sprintf("%s_%s", ns, key)
|
||||
}
|
||||
|
||||
func stringToHeader(value string) map[string]string {
|
||||
headersPairs := strings.Split(value, ",")
|
||||
headers := make(map[string]string)
|
||||
|
||||
for _, header := range headersPairs {
|
||||
n, v, found := strings.Cut(header, "=")
|
||||
if !found {
|
||||
global.Error(errors.New("missing '="), "parse headers", "input", header)
|
||||
continue
|
||||
}
|
||||
name, err := url.QueryUnescape(n)
|
||||
if err != nil {
|
||||
global.Error(err, "escape header key", "key", n)
|
||||
continue
|
||||
}
|
||||
trimmedName := strings.TrimSpace(name)
|
||||
value, err := url.QueryUnescape(v)
|
||||
if err != nil {
|
||||
global.Error(err, "escape header value", "value", v)
|
||||
continue
|
||||
}
|
||||
trimmedValue := strings.TrimSpace(value)
|
||||
|
||||
headers[trimmedName] = trimmedValue
|
||||
}
|
||||
|
||||
return headers
|
||||
}
|
||||
|
||||
func createCertPool(certBytes []byte) (*x509.CertPool, error) {
|
||||
cp := x509.NewCertPool()
|
||||
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
|
||||
return nil, errors.New("failed to append certificate to the cert pool")
|
||||
}
|
||||
return cp, nil
|
||||
}
|
42
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go
generated
vendored
Normal file
42
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig\"}" --out=oconf/envconfig.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry\"}" --out=oconf/options.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig\"}" --out=oconf/options_test.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal\"}" --out=otest/client_test.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf\"}" --out=otest/collector.go
|
||||
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go
|
||||
//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go
|
221
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go
generated
vendored
Normal file
221
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go
generated
vendored
Normal file
@@ -0,0 +1,221 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig"
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// DefaultEnvOptionsReader is the default environments reader.
|
||||
var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
|
||||
GetEnv: os.Getenv,
|
||||
ReadFile: os.ReadFile,
|
||||
Namespace: "OTEL_EXPORTER_OTLP",
|
||||
}
|
||||
|
||||
// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
|
||||
func ApplyGRPCEnvConfigs(cfg Config) Config {
|
||||
opts := getOptionsFromEnv()
|
||||
for _, opt := range opts {
|
||||
cfg = opt.ApplyGRPCOption(cfg)
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
|
||||
func ApplyHTTPEnvConfigs(cfg Config) Config {
|
||||
opts := getOptionsFromEnv()
|
||||
for _, opt := range opts {
|
||||
cfg = opt.ApplyHTTPOption(cfg)
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
func getOptionsFromEnv() []GenericOption {
|
||||
opts := []GenericOption{}
|
||||
|
||||
tlsConf := &tls.Config{}
|
||||
DefaultEnvOptionsReader.Apply(
|
||||
envconfig.WithURL("ENDPOINT", func(u *url.URL) {
|
||||
opts = append(opts, withEndpointScheme(u))
|
||||
opts = append(opts, newSplitOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Endpoint = u.Host
|
||||
// For OTLP/HTTP endpoint URLs without a per-signal
|
||||
// configuration, the passed endpoint is used as a base URL
|
||||
// and the signals are sent to these paths relative to that.
|
||||
cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath)
|
||||
return cfg
|
||||
}, withEndpointForGRPC(u)))
|
||||
}),
|
||||
envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) {
|
||||
opts = append(opts, withEndpointScheme(u))
|
||||
opts = append(opts, newSplitOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Endpoint = u.Host
|
||||
// For endpoint URLs for OTLP/HTTP per-signal variables, the
|
||||
// URL MUST be used as-is without any modification. The only
|
||||
// exception is that if an URL contains no path part, the root
|
||||
// path / MUST be used.
|
||||
path := u.Path
|
||||
if path == "" {
|
||||
path = "/"
|
||||
}
|
||||
cfg.Metrics.URLPath = path
|
||||
return cfg
|
||||
}, withEndpointForGRPC(u)))
|
||||
}),
|
||||
envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
|
||||
envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
|
||||
envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
|
||||
withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
|
||||
envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
|
||||
envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
|
||||
WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||
WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
|
||||
envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||
envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
|
||||
withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
|
||||
withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }),
|
||||
)
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
|
||||
return func(cfg Config) Config {
|
||||
// For OTLP/gRPC endpoints, this is the target to which the
|
||||
// exporter is going to send telemetry.
|
||||
cfg.Metrics.Endpoint = path.Join(u.Host, u.Path)
|
||||
return cfg
|
||||
}
|
||||
}
|
||||
|
||||
// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
|
||||
func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
|
||||
return func(e *envconfig.EnvOptionsReader) {
|
||||
if v, ok := e.GetEnvValue(n); ok {
|
||||
cp := NoCompression
|
||||
if v == "gzip" {
|
||||
cp = GzipCompression
|
||||
}
|
||||
|
||||
fn(cp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func withEndpointScheme(u *url.URL) GenericOption {
|
||||
switch strings.ToLower(u.Scheme) {
|
||||
case "http", "unix":
|
||||
return WithInsecure()
|
||||
default:
|
||||
return WithSecure()
|
||||
}
|
||||
}
|
||||
|
||||
// revive:disable-next-line:flag-parameter
|
||||
func withInsecure(b bool) GenericOption {
|
||||
if b {
|
||||
return WithInsecure()
|
||||
}
|
||||
return WithSecure()
|
||||
}
|
||||
|
||||
func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
|
||||
return func(e *envconfig.EnvOptionsReader) {
|
||||
if c.RootCAs != nil || len(c.Certificates) > 0 {
|
||||
fn(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) {
|
||||
return func(e *envconfig.EnvOptionsReader) {
|
||||
if s, ok := e.GetEnvValue(n); ok {
|
||||
switch strings.ToLower(s) {
|
||||
case "cumulative":
|
||||
fn(cumulativeTemporality)
|
||||
case "delta":
|
||||
fn(deltaTemporality)
|
||||
case "lowmemory":
|
||||
fn(lowMemory)
|
||||
default:
|
||||
global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
|
||||
return metricdata.CumulativeTemporality
|
||||
}
|
||||
|
||||
func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
|
||||
switch ik {
|
||||
case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
|
||||
return metricdata.DeltaTemporality
|
||||
default:
|
||||
return metricdata.CumulativeTemporality
|
||||
}
|
||||
}
|
||||
|
||||
func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
|
||||
switch ik {
|
||||
case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
|
||||
return metricdata.DeltaTemporality
|
||||
default:
|
||||
return metricdata.CumulativeTemporality
|
||||
}
|
||||
}
|
||||
|
||||
func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) {
|
||||
return func(e *envconfig.EnvOptionsReader) {
|
||||
if s, ok := e.GetEnvValue(n); ok {
|
||||
switch strings.ToLower(s) {
|
||||
case "explicit_bucket_histogram":
|
||||
fn(metric.DefaultAggregationSelector)
|
||||
case "base2_exponential_bucket_histogram":
|
||||
fn(func(kind metric.InstrumentKind) metric.Aggregation {
|
||||
if kind == metric.InstrumentKindHistogram {
|
||||
return metric.AggregationBase2ExponentialHistogram{
|
||||
MaxSize: 160,
|
||||
MaxScale: 20,
|
||||
NoMinMax: false,
|
||||
}
|
||||
}
|
||||
return metric.DefaultAggregationSelector(kind)
|
||||
})
|
||||
default:
|
||||
global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
359
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go
generated
vendored
Normal file
359
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go
generated
vendored
Normal file
@@ -0,0 +1,359 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/encoding/gzip"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultMaxAttempts describes how many times the driver
|
||||
// should retry the sending of the payload in case of a
|
||||
// retryable error.
|
||||
DefaultMaxAttempts int = 5
|
||||
// DefaultMetricsPath is a default URL path for endpoint that
|
||||
// receives metrics.
|
||||
DefaultMetricsPath string = "/v1/metrics"
|
||||
// DefaultBackoff is a default base backoff time used in the
|
||||
// exponential backoff strategy.
|
||||
DefaultBackoff time.Duration = 300 * time.Millisecond
|
||||
// DefaultTimeout is a default max waiting time for the backend to process
|
||||
// each span or metrics batch.
|
||||
DefaultTimeout time.Duration = 10 * time.Second
|
||||
)
|
||||
|
||||
type (
|
||||
SignalConfig struct {
|
||||
Endpoint string
|
||||
Insecure bool
|
||||
TLSCfg *tls.Config
|
||||
Headers map[string]string
|
||||
Compression Compression
|
||||
Timeout time.Duration
|
||||
URLPath string
|
||||
|
||||
// gRPC configurations
|
||||
GRPCCredentials credentials.TransportCredentials
|
||||
|
||||
TemporalitySelector metric.TemporalitySelector
|
||||
AggregationSelector metric.AggregationSelector
|
||||
}
|
||||
|
||||
Config struct {
|
||||
// Signal specific configurations
|
||||
Metrics SignalConfig
|
||||
|
||||
RetryConfig retry.Config
|
||||
|
||||
// gRPC configurations
|
||||
ReconnectionPeriod time.Duration
|
||||
ServiceConfig string
|
||||
DialOptions []grpc.DialOption
|
||||
GRPCConn *grpc.ClientConn
|
||||
}
|
||||
)
|
||||
|
||||
// NewHTTPConfig returns a new Config with all settings applied from opts and
|
||||
// any unset setting using the default HTTP config values.
|
||||
func NewHTTPConfig(opts ...HTTPOption) Config {
|
||||
cfg := Config{
|
||||
Metrics: SignalConfig{
|
||||
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
|
||||
URLPath: DefaultMetricsPath,
|
||||
Compression: NoCompression,
|
||||
Timeout: DefaultTimeout,
|
||||
|
||||
TemporalitySelector: metric.DefaultTemporalitySelector,
|
||||
AggregationSelector: metric.DefaultAggregationSelector,
|
||||
},
|
||||
RetryConfig: retry.DefaultConfig,
|
||||
}
|
||||
cfg = ApplyHTTPEnvConfigs(cfg)
|
||||
for _, opt := range opts {
|
||||
cfg = opt.ApplyHTTPOption(cfg)
|
||||
}
|
||||
cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath)
|
||||
return cfg
|
||||
}
|
||||
|
||||
// cleanPath returns a path with all spaces trimmed and all redundancies
|
||||
// removed. If urlPath is empty or cleaning it results in an empty string,
|
||||
// defaultPath is returned instead.
|
||||
func cleanPath(urlPath string, defaultPath string) string {
|
||||
tmp := path.Clean(strings.TrimSpace(urlPath))
|
||||
if tmp == "." {
|
||||
return defaultPath
|
||||
}
|
||||
if !path.IsAbs(tmp) {
|
||||
tmp = fmt.Sprintf("/%s", tmp)
|
||||
}
|
||||
return tmp
|
||||
}
|
||||
|
||||
// NewGRPCConfig returns a new Config with all settings applied from opts and
|
||||
// any unset setting using the default gRPC config values.
|
||||
func NewGRPCConfig(opts ...GRPCOption) Config {
|
||||
userAgent := "OTel OTLP Exporter Go/" + otlpmetric.Version()
|
||||
cfg := Config{
|
||||
Metrics: SignalConfig{
|
||||
Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
|
||||
URLPath: DefaultMetricsPath,
|
||||
Compression: NoCompression,
|
||||
Timeout: DefaultTimeout,
|
||||
|
||||
TemporalitySelector: metric.DefaultTemporalitySelector,
|
||||
AggregationSelector: metric.DefaultAggregationSelector,
|
||||
},
|
||||
RetryConfig: retry.DefaultConfig,
|
||||
DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
|
||||
}
|
||||
cfg = ApplyGRPCEnvConfigs(cfg)
|
||||
for _, opt := range opts {
|
||||
cfg = opt.ApplyGRPCOption(cfg)
|
||||
}
|
||||
|
||||
if cfg.ServiceConfig != "" {
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
|
||||
}
|
||||
// Priroritize GRPCCredentials over Insecure (passing both is an error).
|
||||
if cfg.Metrics.GRPCCredentials != nil {
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
|
||||
} else if cfg.Metrics.Insecure {
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
} else {
|
||||
// Default to using the host's root CA.
|
||||
creds := credentials.NewTLS(nil)
|
||||
cfg.Metrics.GRPCCredentials = creds
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
|
||||
}
|
||||
if cfg.Metrics.Compression == GzipCompression {
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
|
||||
}
|
||||
if len(cfg.DialOptions) != 0 {
|
||||
cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
|
||||
}
|
||||
if cfg.ReconnectionPeriod != 0 {
|
||||
p := grpc.ConnectParams{
|
||||
Backoff: backoff.DefaultConfig,
|
||||
MinConnectTimeout: cfg.ReconnectionPeriod,
|
||||
}
|
||||
cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
type (
|
||||
// GenericOption applies an option to the HTTP or gRPC driver.
|
||||
GenericOption interface {
|
||||
ApplyHTTPOption(Config) Config
|
||||
ApplyGRPCOption(Config) Config
|
||||
|
||||
// A private method to prevent users implementing the
|
||||
// interface and so future additions to it will not
|
||||
// violate compatibility.
|
||||
private()
|
||||
}
|
||||
|
||||
// HTTPOption applies an option to the HTTP driver.
|
||||
HTTPOption interface {
|
||||
ApplyHTTPOption(Config) Config
|
||||
|
||||
// A private method to prevent users implementing the
|
||||
// interface and so future additions to it will not
|
||||
// violate compatibility.
|
||||
private()
|
||||
}
|
||||
|
||||
// GRPCOption applies an option to the gRPC driver.
|
||||
GRPCOption interface {
|
||||
ApplyGRPCOption(Config) Config
|
||||
|
||||
// A private method to prevent users implementing the
|
||||
// interface and so future additions to it will not
|
||||
// violate compatibility.
|
||||
private()
|
||||
}
|
||||
)
|
||||
|
||||
// genericOption is an option that applies the same logic
|
||||
// for both gRPC and HTTP.
|
||||
type genericOption struct {
|
||||
fn func(Config) Config
|
||||
}
|
||||
|
||||
func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
|
||||
return g.fn(cfg)
|
||||
}
|
||||
|
||||
func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
|
||||
return g.fn(cfg)
|
||||
}
|
||||
|
||||
func (genericOption) private() {}
|
||||
|
||||
func newGenericOption(fn func(cfg Config) Config) GenericOption {
|
||||
return &genericOption{fn: fn}
|
||||
}
|
||||
|
||||
// splitOption is an option that applies different logics
|
||||
// for gRPC and HTTP.
|
||||
type splitOption struct {
|
||||
httpFn func(Config) Config
|
||||
grpcFn func(Config) Config
|
||||
}
|
||||
|
||||
func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
|
||||
return g.grpcFn(cfg)
|
||||
}
|
||||
|
||||
func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
|
||||
return g.httpFn(cfg)
|
||||
}
|
||||
|
||||
func (splitOption) private() {}
|
||||
|
||||
func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
|
||||
return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
|
||||
}
|
||||
|
||||
// httpOption is an option that is only applied to the HTTP driver.
|
||||
type httpOption struct {
|
||||
fn func(Config) Config
|
||||
}
|
||||
|
||||
func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
|
||||
return h.fn(cfg)
|
||||
}
|
||||
|
||||
func (httpOption) private() {}
|
||||
|
||||
func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
|
||||
return &httpOption{fn: fn}
|
||||
}
|
||||
|
||||
// grpcOption is an option that is only applied to the gRPC driver.
|
||||
type grpcOption struct {
|
||||
fn func(Config) Config
|
||||
}
|
||||
|
||||
func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
|
||||
return h.fn(cfg)
|
||||
}
|
||||
|
||||
func (grpcOption) private() {}
|
||||
|
||||
func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
|
||||
return &grpcOption{fn: fn}
|
||||
}
|
||||
|
||||
// Generic Options
|
||||
|
||||
func WithEndpoint(endpoint string) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Endpoint = endpoint
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithCompression(compression Compression) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Compression = compression
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithURLPath(urlPath string) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.URLPath = urlPath
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithRetry(rc retry.Config) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.RetryConfig = rc
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
|
||||
return newSplitOption(func(cfg Config) Config {
|
||||
cfg.Metrics.TLSCfg = tlsCfg.Clone()
|
||||
return cfg
|
||||
}, func(cfg Config) Config {
|
||||
cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithInsecure() GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Insecure = true
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithSecure() GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Insecure = false
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithHeaders(headers map[string]string) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Headers = headers
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithTimeout(duration time.Duration) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.Timeout = duration
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.TemporalitySelector = selector
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
func WithAggregationSelector(selector metric.AggregationSelector) GenericOption {
|
||||
return newGenericOption(func(cfg Config) Config {
|
||||
cfg.Metrics.AggregationSelector = selector
|
||||
return cfg
|
||||
})
|
||||
}
|
58
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go
generated
vendored
Normal file
58
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
// DefaultCollectorGRPCPort is the default gRPC port of the collector.
|
||||
DefaultCollectorGRPCPort uint16 = 4317
|
||||
// DefaultCollectorHTTPPort is the default HTTP port of the collector.
|
||||
DefaultCollectorHTTPPort uint16 = 4318
|
||||
// DefaultCollectorHost is the host address the Exporter will attempt
|
||||
// connect to if no collector address is provided.
|
||||
DefaultCollectorHost string = "localhost"
|
||||
)
|
||||
|
||||
// Compression describes the compression used for payloads sent to the
|
||||
// collector.
|
||||
type Compression int
|
||||
|
||||
const (
|
||||
// NoCompression tells the driver to send payloads without
|
||||
// compression.
|
||||
NoCompression Compression = iota
|
||||
// GzipCompression tells the driver to send payloads after
|
||||
// compressing them with gzip.
|
||||
GzipCompression
|
||||
)
|
||||
|
||||
// RetrySettings defines configuration for retrying batches in case of export failure
|
||||
// using an exponential backoff.
|
||||
type RetrySettings struct {
|
||||
// Enabled indicates whether to not retry sending batches in case of export failure.
|
||||
Enabled bool
|
||||
// InitialInterval the time to wait after the first failure before retrying.
|
||||
InitialInterval time.Duration
|
||||
// MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
|
||||
// consecutive retries will always be `MaxInterval`.
|
||||
MaxInterval time.Duration
|
||||
// MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
|
||||
// Once this value is reached, the data is discarded.
|
||||
MaxElapsedTime time.Duration
|
||||
}
|
49
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go
generated
vendored
Normal file
49
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ReadTLSConfigFromFile reads a PEM certificate file and creates
|
||||
// a tls.Config that will use this certifate to verify a server certificate.
|
||||
func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return CreateTLSConfig(b)
|
||||
}
|
||||
|
||||
// CreateTLSConfig creates a tls.Config from a raw certificate bytes
|
||||
// to verify a server certificate.
|
||||
func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
|
||||
cp := x509.NewCertPool()
|
||||
if ok := cp.AppendCertsFromPEM(certBytes); !ok {
|
||||
return nil, errors.New("failed to append certificate to the cert pool")
|
||||
}
|
||||
|
||||
return &tls.Config{
|
||||
RootCAs: cp,
|
||||
}, nil
|
||||
}
|
67
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go
generated
vendored
Normal file
67
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/partialsuccess.go
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
|
||||
|
||||
import "fmt"
|
||||
|
||||
// PartialSuccess represents the underlying error for all handling
|
||||
// OTLP partial success messages. Use `errors.Is(err,
|
||||
// PartialSuccess{})` to test whether an error passed to the OTel
|
||||
// error handler belongs to this category.
|
||||
type PartialSuccess struct {
|
||||
ErrorMessage string
|
||||
RejectedItems int64
|
||||
RejectedKind string
|
||||
}
|
||||
|
||||
var _ error = PartialSuccess{}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (ps PartialSuccess) Error() string {
|
||||
msg := ps.ErrorMessage
|
||||
if msg == "" {
|
||||
msg = "empty message"
|
||||
}
|
||||
return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
|
||||
}
|
||||
|
||||
// Is supports the errors.Is() interface.
|
||||
func (ps PartialSuccess) Is(err error) bool {
|
||||
_, ok := err.(PartialSuccess)
|
||||
return ok
|
||||
}
|
||||
|
||||
// TracePartialSuccessError returns an error describing a partial success
|
||||
// response for the trace signal.
|
||||
func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
|
||||
return PartialSuccess{
|
||||
ErrorMessage: errorMessage,
|
||||
RejectedItems: itemsRejected,
|
||||
RejectedKind: "spans",
|
||||
}
|
||||
}
|
||||
|
||||
// MetricPartialSuccessError returns an error describing a partial success
|
||||
// response for the metric signal.
|
||||
func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
|
||||
return PartialSuccess{
|
||||
ErrorMessage: errorMessage,
|
||||
RejectedItems: itemsRejected,
|
||||
RejectedKind: "metric data points",
|
||||
}
|
||||
}
|
156
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go
generated
vendored
Normal file
156
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/retry/retry.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package retry provides request retry functionality that can perform
|
||||
// configurable exponential backoff for transient errors and honor any
|
||||
// explicit throttle responses received.
|
||||
package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
)
|
||||
|
||||
// DefaultConfig are the recommended defaults to use.
|
||||
var DefaultConfig = Config{
|
||||
Enabled: true,
|
||||
InitialInterval: 5 * time.Second,
|
||||
MaxInterval: 30 * time.Second,
|
||||
MaxElapsedTime: time.Minute,
|
||||
}
|
||||
|
||||
// Config defines configuration for retrying batches in case of export failure
|
||||
// using an exponential backoff.
|
||||
type Config struct {
|
||||
// Enabled indicates whether to not retry sending batches in case of
|
||||
// export failure.
|
||||
Enabled bool
|
||||
// InitialInterval the time to wait after the first failure before
|
||||
// retrying.
|
||||
InitialInterval time.Duration
|
||||
// MaxInterval is the upper bound on backoff interval. Once this value is
|
||||
// reached the delay between consecutive retries will always be
|
||||
// `MaxInterval`.
|
||||
MaxInterval time.Duration
|
||||
// MaxElapsedTime is the maximum amount of time (including retries) spent
|
||||
// trying to send a request/batch. Once this value is reached, the data
|
||||
// is discarded.
|
||||
MaxElapsedTime time.Duration
|
||||
}
|
||||
|
||||
// RequestFunc wraps a request with retry logic.
|
||||
type RequestFunc func(context.Context, func(context.Context) error) error
|
||||
|
||||
// EvaluateFunc returns if an error is retry-able and if an explicit throttle
|
||||
// duration should be honored that was included in the error.
|
||||
//
|
||||
// The function must return true if the error argument is retry-able,
|
||||
// otherwise it must return false for the first return parameter.
|
||||
//
|
||||
// The function must return a non-zero time.Duration if the error contains
|
||||
// explicit throttle duration that should be honored, otherwise it must return
|
||||
// a zero valued time.Duration.
|
||||
type EvaluateFunc func(error) (bool, time.Duration)
|
||||
|
||||
// RequestFunc returns a RequestFunc using the evaluate function to determine
|
||||
// if requests can be retried and based on the exponential backoff
|
||||
// configuration of c.
|
||||
func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
|
||||
if !c.Enabled {
|
||||
return func(ctx context.Context, fn func(context.Context) error) error {
|
||||
return fn(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
return func(ctx context.Context, fn func(context.Context) error) error {
|
||||
// Do not use NewExponentialBackOff since it calls Reset and the code here
|
||||
// must call Reset after changing the InitialInterval (this saves an
|
||||
// unnecessary call to Now).
|
||||
b := &backoff.ExponentialBackOff{
|
||||
InitialInterval: c.InitialInterval,
|
||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||
Multiplier: backoff.DefaultMultiplier,
|
||||
MaxInterval: c.MaxInterval,
|
||||
MaxElapsedTime: c.MaxElapsedTime,
|
||||
Stop: backoff.Stop,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
|
||||
for {
|
||||
err := fn(ctx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
retryable, throttle := evaluate(err)
|
||||
if !retryable {
|
||||
return err
|
||||
}
|
||||
|
||||
bOff := b.NextBackOff()
|
||||
if bOff == backoff.Stop {
|
||||
return fmt.Errorf("max retry time elapsed: %w", err)
|
||||
}
|
||||
|
||||
// Wait for the greater of the backoff or throttle delay.
|
||||
var delay time.Duration
|
||||
if bOff > throttle {
|
||||
delay = bOff
|
||||
} else {
|
||||
elapsed := b.GetElapsedTime()
|
||||
if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
|
||||
return fmt.Errorf("max retry time would elapse: %w", err)
|
||||
}
|
||||
delay = throttle
|
||||
}
|
||||
|
||||
if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
|
||||
return fmt.Errorf("%w: %s", ctxErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allow override for testing.
|
||||
var waitFunc = wait
|
||||
|
||||
// wait takes the caller's context, and the amount of time to wait. It will
|
||||
// return nil if the timer fires before or at the same time as the context's
|
||||
// deadline. This indicates that the call can be retried.
|
||||
func wait(ctx context.Context, delay time.Duration) error {
|
||||
timer := time.NewTimer(delay)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Handle the case where the timer and context deadline end
|
||||
// simultaneously by prioritizing the timer expiration nil value
|
||||
// response.
|
||||
select {
|
||||
case <-timer.C:
|
||||
default:
|
||||
return ctx.Err()
|
||||
}
|
||||
case <-timer.C:
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
155
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go
generated
vendored
Normal file
155
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go
generated
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
|
||||
|
||||
import (
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
)
|
||||
|
||||
// AttrIter transforms an attribute iterator into OTLP key-values.
|
||||
func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
|
||||
l := iter.Len()
|
||||
if l == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]*cpb.KeyValue, 0, l)
|
||||
for iter.Next() {
|
||||
out = append(out, KeyValue(iter.Attribute()))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
|
||||
func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
|
||||
if len(attrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
out := make([]*cpb.KeyValue, 0, len(attrs))
|
||||
for _, kv := range attrs {
|
||||
out = append(out, KeyValue(kv))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// KeyValue transforms an attribute KeyValue into an OTLP key-value.
|
||||
func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
|
||||
return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
|
||||
}
|
||||
|
||||
// Value transforms an attribute Value into an OTLP AnyValue.
|
||||
func Value(v attribute.Value) *cpb.AnyValue {
|
||||
av := new(cpb.AnyValue)
|
||||
switch v.Type() {
|
||||
case attribute.BOOL:
|
||||
av.Value = &cpb.AnyValue_BoolValue{
|
||||
BoolValue: v.AsBool(),
|
||||
}
|
||||
case attribute.BOOLSLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: boolSliceValues(v.AsBoolSlice()),
|
||||
},
|
||||
}
|
||||
case attribute.INT64:
|
||||
av.Value = &cpb.AnyValue_IntValue{
|
||||
IntValue: v.AsInt64(),
|
||||
}
|
||||
case attribute.INT64SLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: int64SliceValues(v.AsInt64Slice()),
|
||||
},
|
||||
}
|
||||
case attribute.FLOAT64:
|
||||
av.Value = &cpb.AnyValue_DoubleValue{
|
||||
DoubleValue: v.AsFloat64(),
|
||||
}
|
||||
case attribute.FLOAT64SLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: float64SliceValues(v.AsFloat64Slice()),
|
||||
},
|
||||
}
|
||||
case attribute.STRING:
|
||||
av.Value = &cpb.AnyValue_StringValue{
|
||||
StringValue: v.AsString(),
|
||||
}
|
||||
case attribute.STRINGSLICE:
|
||||
av.Value = &cpb.AnyValue_ArrayValue{
|
||||
ArrayValue: &cpb.ArrayValue{
|
||||
Values: stringSliceValues(v.AsStringSlice()),
|
||||
},
|
||||
}
|
||||
default:
|
||||
av.Value = &cpb.AnyValue_StringValue{
|
||||
StringValue: "INVALID",
|
||||
}
|
||||
}
|
||||
return av
|
||||
}
|
||||
|
||||
func boolSliceValues(vals []bool) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_BoolValue{
|
||||
BoolValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func int64SliceValues(vals []int64) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_IntValue{
|
||||
IntValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func float64SliceValues(vals []float64) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_DoubleValue{
|
||||
DoubleValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func stringSliceValues(vals []string) []*cpb.AnyValue {
|
||||
converted := make([]*cpb.AnyValue, len(vals))
|
||||
for i, v := range vals {
|
||||
converted[i] = &cpb.AnyValue{
|
||||
Value: &cpb.AnyValue_StringValue{
|
||||
StringValue: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
return converted
|
||||
}
|
114
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go
generated
vendored
Normal file
114
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go
generated
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
errUnknownAggregation = errors.New("unknown aggregation")
|
||||
errUnknownTemporality = errors.New("unknown temporality")
|
||||
)
|
||||
|
||||
type errMetric struct {
|
||||
m *mpb.Metric
|
||||
err error
|
||||
}
|
||||
|
||||
func (e errMetric) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
func (e errMetric) Error() string {
|
||||
format := "invalid metric (name: %q, description: %q, unit: %q): %s"
|
||||
return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
|
||||
}
|
||||
|
||||
func (e errMetric) Is(target error) bool {
|
||||
return errors.Is(e.err, target)
|
||||
}
|
||||
|
||||
// multiErr is used by the data-type transform functions to wrap multiple
|
||||
// errors into a single return value. The error message will show all errors
|
||||
// as a list and scope them by the datatype name that is returning them.
|
||||
type multiErr struct {
|
||||
datatype string
|
||||
errs []error
|
||||
}
|
||||
|
||||
// errOrNil returns nil if e contains no errors, otherwise it returns e.
|
||||
func (e *multiErr) errOrNil() error {
|
||||
if len(e.errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// append adds err to e. If err is a multiErr, its errs are flattened into e.
|
||||
func (e *multiErr) append(err error) {
|
||||
// Do not use errors.As here, this should only be flattened one layer. If
|
||||
// there is a *multiErr several steps down the chain, all the errors above
|
||||
// it will be discarded if errors.As is used instead.
|
||||
switch other := err.(type) {
|
||||
case *multiErr:
|
||||
// Flatten err errors into e.
|
||||
e.errs = append(e.errs, other.errs...)
|
||||
default:
|
||||
e.errs = append(e.errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *multiErr) Error() string {
|
||||
es := make([]string, len(e.errs))
|
||||
for i, err := range e.errs {
|
||||
es[i] = fmt.Sprintf("* %s", err)
|
||||
}
|
||||
|
||||
format := "%d errors occurred transforming %s:\n\t%s"
|
||||
return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
|
||||
}
|
||||
|
||||
func (e *multiErr) Unwrap() error {
|
||||
switch len(e.errs) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return e.errs[0]
|
||||
}
|
||||
|
||||
// Return a multiErr without the leading error.
|
||||
cp := &multiErr{
|
||||
datatype: e.datatype,
|
||||
errs: make([]error, len(e.errs)-1),
|
||||
}
|
||||
copy(cp.errs, e.errs[1:])
|
||||
return cp
|
||||
}
|
||||
|
||||
func (e *multiErr) Is(target error) bool {
|
||||
if len(e.errs) == 0 {
|
||||
return false
|
||||
}
|
||||
// Check if the first error is target.
|
||||
return errors.Is(e.errs[0], target)
|
||||
}
|
292
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go
generated
vendored
Normal file
292
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go
generated
vendored
Normal file
@@ -0,0 +1,292 @@
|
||||
// Code created by gotmpl. DO NOT MODIFY.
|
||||
// source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl
|
||||
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package transform provides transformation functionality from the
|
||||
// sdk/metric/metricdata data-types into OTLP data-types.
|
||||
package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
cpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||
mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
rpb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||
)
|
||||
|
||||
// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
|
||||
// contains invalid ScopeMetrics, an error will be returned along with an OTLP
|
||||
// ResourceMetrics that contains partial OTLP ScopeMetrics.
|
||||
func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
|
||||
sms, err := ScopeMetrics(rm.ScopeMetrics)
|
||||
return &mpb.ResourceMetrics{
|
||||
Resource: &rpb.Resource{
|
||||
Attributes: AttrIter(rm.Resource.Iter()),
|
||||
},
|
||||
ScopeMetrics: sms,
|
||||
SchemaUrl: rm.Resource.SchemaURL(),
|
||||
}, err
|
||||
}
|
||||
|
||||
// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
|
||||
// sms contains invalid metric values, an error will be returned along with a
|
||||
// slice that contains partial OTLP ScopeMetrics.
|
||||
func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
|
||||
errs := &multiErr{datatype: "ScopeMetrics"}
|
||||
out := make([]*mpb.ScopeMetrics, 0, len(sms))
|
||||
for _, sm := range sms {
|
||||
ms, err := Metrics(sm.Metrics)
|
||||
if err != nil {
|
||||
errs.append(err)
|
||||
}
|
||||
|
||||
out = append(out, &mpb.ScopeMetrics{
|
||||
Scope: &cpb.InstrumentationScope{
|
||||
Name: sm.Scope.Name,
|
||||
Version: sm.Scope.Version,
|
||||
},
|
||||
Metrics: ms,
|
||||
SchemaUrl: sm.Scope.SchemaURL,
|
||||
})
|
||||
}
|
||||
return out, errs.errOrNil()
|
||||
}
|
||||
|
||||
// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
|
||||
// invalid metric values, an error will be returned along with a slice that
|
||||
// contains partial OTLP Metrics.
|
||||
func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
|
||||
errs := &multiErr{datatype: "Metrics"}
|
||||
out := make([]*mpb.Metric, 0, len(ms))
|
||||
for _, m := range ms {
|
||||
o, err := metric(m)
|
||||
if err != nil {
|
||||
// Do not include invalid data. Drop the metric, report the error.
|
||||
errs.append(errMetric{m: o, err: err})
|
||||
continue
|
||||
}
|
||||
out = append(out, o)
|
||||
}
|
||||
return out, errs.errOrNil()
|
||||
}
|
||||
|
||||
func metric(m metricdata.Metrics) (*mpb.Metric, error) {
|
||||
var err error
|
||||
out := &mpb.Metric{
|
||||
Name: m.Name,
|
||||
Description: m.Description,
|
||||
Unit: string(m.Unit),
|
||||
}
|
||||
switch a := m.Data.(type) {
|
||||
case metricdata.Gauge[int64]:
|
||||
out.Data = Gauge[int64](a)
|
||||
case metricdata.Gauge[float64]:
|
||||
out.Data = Gauge[float64](a)
|
||||
case metricdata.Sum[int64]:
|
||||
out.Data, err = Sum[int64](a)
|
||||
case metricdata.Sum[float64]:
|
||||
out.Data, err = Sum[float64](a)
|
||||
case metricdata.Histogram[int64]:
|
||||
out.Data, err = Histogram(a)
|
||||
case metricdata.Histogram[float64]:
|
||||
out.Data, err = Histogram(a)
|
||||
case metricdata.ExponentialHistogram[int64]:
|
||||
out.Data, err = ExponentialHistogram(a)
|
||||
case metricdata.ExponentialHistogram[float64]:
|
||||
out.Data, err = ExponentialHistogram(a)
|
||||
default:
|
||||
return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
// Gauge returns an OTLP Metric_Gauge generated from g.
|
||||
func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
|
||||
return &mpb.Metric_Gauge{
|
||||
Gauge: &mpb.Gauge{
|
||||
DataPoints: DataPoints(g.DataPoints),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Sum returns an OTLP Metric_Sum generated from s. An error is returned
|
||||
// if the temporality of s is unknown.
|
||||
func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
|
||||
t, err := Temporality(s.Temporality)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mpb.Metric_Sum{
|
||||
Sum: &mpb.Sum{
|
||||
AggregationTemporality: t,
|
||||
IsMonotonic: s.IsMonotonic,
|
||||
DataPoints: DataPoints(s.DataPoints),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
|
||||
func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
|
||||
out := make([]*mpb.NumberDataPoint, 0, len(dPts))
|
||||
for _, dPt := range dPts {
|
||||
ndp := &mpb.NumberDataPoint{
|
||||
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||
}
|
||||
switch v := any(dPt.Value).(type) {
|
||||
case int64:
|
||||
ndp.Value = &mpb.NumberDataPoint_AsInt{
|
||||
AsInt: v,
|
||||
}
|
||||
case float64:
|
||||
ndp.Value = &mpb.NumberDataPoint_AsDouble{
|
||||
AsDouble: v,
|
||||
}
|
||||
}
|
||||
out = append(out, ndp)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Histogram returns an OTLP Metric_Histogram generated from h. An error is
|
||||
// returned if the temporality of h is unknown.
|
||||
func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) {
|
||||
t, err := Temporality(h.Temporality)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mpb.Metric_Histogram{
|
||||
Histogram: &mpb.Histogram{
|
||||
AggregationTemporality: t,
|
||||
DataPoints: HistogramDataPoints(h.DataPoints),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
|
||||
// from dPts.
|
||||
func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint {
|
||||
out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
|
||||
for _, dPt := range dPts {
|
||||
sum := float64(dPt.Sum)
|
||||
hdp := &mpb.HistogramDataPoint{
|
||||
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||
Count: dPt.Count,
|
||||
Sum: &sum,
|
||||
BucketCounts: dPt.BucketCounts,
|
||||
ExplicitBounds: dPt.Bounds,
|
||||
}
|
||||
if v, ok := dPt.Min.Value(); ok {
|
||||
vF64 := float64(v)
|
||||
hdp.Min = &vF64
|
||||
}
|
||||
if v, ok := dPt.Max.Value(); ok {
|
||||
vF64 := float64(v)
|
||||
hdp.Max = &vF64
|
||||
}
|
||||
out = append(out, hdp)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
|
||||
// returned if the temporality of h is unknown.
|
||||
func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
|
||||
t, err := Temporality(h.Temporality)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &mpb.Metric_ExponentialHistogram{
|
||||
ExponentialHistogram: &mpb.ExponentialHistogram{
|
||||
AggregationTemporality: t,
|
||||
DataPoints: ExponentialHistogramDataPoints(h.DataPoints),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
|
||||
// from dPts.
|
||||
func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
|
||||
out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
|
||||
for _, dPt := range dPts {
|
||||
sum := float64(dPt.Sum)
|
||||
ehdp := &mpb.ExponentialHistogramDataPoint{
|
||||
Attributes: AttrIter(dPt.Attributes.Iter()),
|
||||
StartTimeUnixNano: timeUnixNano(dPt.StartTime),
|
||||
TimeUnixNano: timeUnixNano(dPt.Time),
|
||||
Count: dPt.Count,
|
||||
Sum: &sum,
|
||||
Scale: dPt.Scale,
|
||||
ZeroCount: dPt.ZeroCount,
|
||||
|
||||
Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket),
|
||||
Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket),
|
||||
}
|
||||
if v, ok := dPt.Min.Value(); ok {
|
||||
vF64 := float64(v)
|
||||
ehdp.Min = &vF64
|
||||
}
|
||||
if v, ok := dPt.Max.Value(); ok {
|
||||
vF64 := float64(v)
|
||||
ehdp.Max = &vF64
|
||||
}
|
||||
out = append(out, ehdp)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
|
||||
// from bucket.
|
||||
func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
|
||||
return &mpb.ExponentialHistogramDataPoint_Buckets{
|
||||
Offset: bucket.Offset,
|
||||
BucketCounts: bucket.Counts,
|
||||
}
|
||||
}
|
||||
|
||||
// Temporality returns an OTLP AggregationTemporality generated from t. If t
|
||||
// is unknown, an error is returned along with the invalid
|
||||
// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
|
||||
func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
|
||||
switch t {
|
||||
case metricdata.DeltaTemporality:
|
||||
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
|
||||
case metricdata.CumulativeTemporality:
|
||||
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
|
||||
default:
|
||||
err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
|
||||
return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
|
||||
}
|
||||
}
|
||||
|
||||
// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
|
||||
// since January 1, 1970 UTC as uint64.
|
||||
// The result is undefined if the Unix time
|
||||
// in nanoseconds cannot be represented by an int64
|
||||
// (a date before the year 1678 or after 2262).
|
||||
// timeUnixNano on the zero Time returns 0.
|
||||
// The result does not depend on the location associated with t.
|
||||
func timeUnixNano(t time.Time) uint64 {
|
||||
if t.IsZero() {
|
||||
return 0
|
||||
}
|
||||
return uint64(t.UnixNano())
|
||||
}
|
20
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/version.go
generated
vendored
Normal file
20
vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/version.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
|
||||
|
||||
// Version is the current release version of the OpenTelemetry OTLP metrics exporter in use.
|
||||
func Version() string {
|
||||
return "0.42.0"
|
||||
}
|
201
vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE
generated
vendored
Normal file
201
vendor/go.opentelemetry.io/otel/exporters/prometheus/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
153
vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
generated
vendored
Normal file
153
vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go
generated
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
)
|
||||
|
||||
// config contains options for the exporter.
|
||||
type config struct {
|
||||
registerer prometheus.Registerer
|
||||
disableTargetInfo bool
|
||||
withoutUnits bool
|
||||
withoutCounterSuffixes bool
|
||||
readerOpts []metric.ManualReaderOption
|
||||
disableScopeInfo bool
|
||||
namespace string
|
||||
}
|
||||
|
||||
// newConfig creates a validated config configured with options.
|
||||
func newConfig(opts ...Option) config {
|
||||
cfg := config{}
|
||||
for _, opt := range opts {
|
||||
cfg = opt.apply(cfg)
|
||||
}
|
||||
|
||||
if cfg.registerer == nil {
|
||||
cfg.registerer = prometheus.DefaultRegisterer
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
// Option sets exporter option values.
|
||||
type Option interface {
|
||||
apply(config) config
|
||||
}
|
||||
|
||||
type optionFunc func(config) config
|
||||
|
||||
func (fn optionFunc) apply(cfg config) config {
|
||||
return fn(cfg)
|
||||
}
|
||||
|
||||
// WithRegisterer configures which prometheus Registerer the Exporter will
|
||||
// register with. If no registerer is used the prometheus DefaultRegisterer is
|
||||
// used.
|
||||
func WithRegisterer(reg prometheus.Registerer) Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
cfg.registerer = reg
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// WithAggregationSelector configure the Aggregation Selector the exporter will
|
||||
// use. If no AggregationSelector is provided the DefaultAggregationSelector is
|
||||
// used.
|
||||
func WithAggregationSelector(agg metric.AggregationSelector) Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
cfg.readerOpts = append(cfg.readerOpts, metric.WithAggregationSelector(agg))
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// WithProducer configure the metric Producer the exporter will use as a source
|
||||
// of external metric data.
|
||||
func WithProducer(producer metric.Producer) Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
cfg.readerOpts = append(cfg.readerOpts, metric.WithProducer(producer))
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// WithoutTargetInfo configures the Exporter to not export the resource target_info metric.
|
||||
// If not specified, the Exporter will create a target_info metric containing
|
||||
// the metrics' resource.Resource attributes.
|
||||
func WithoutTargetInfo() Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
cfg.disableTargetInfo = true
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// WithoutUnits disables exporter's addition of unit suffixes to metric names,
|
||||
// and will also prevent unit comments from being added in OpenMetrics once
|
||||
// unit comments are supported.
|
||||
//
|
||||
// By default, metric names include a unit suffix to follow Prometheus naming
|
||||
// conventions. For example, the counter metric request.duration, with unit
|
||||
// milliseconds would become request_duration_milliseconds_total.
|
||||
// With this option set, the name would instead be request_duration_total.
|
||||
func WithoutUnits() Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
cfg.withoutUnits = true
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// WithoutUnits disables exporter's addition _total suffixes on counters.
|
||||
//
|
||||
// By default, metric names include a _total suffix to follow Prometheus naming
|
||||
// conventions. For example, the counter metric happy.people would become
|
||||
// happy_people_total. With this option set, the name would instead be
|
||||
// happy_people.
|
||||
func WithoutCounterSuffixes() Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
cfg.withoutCounterSuffixes = true
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// WithoutScopeInfo configures the Exporter to not export the otel_scope_info metric.
|
||||
// If not specified, the Exporter will create a otel_scope_info metric containing
|
||||
// the metrics' Instrumentation Scope, and also add labels about Instrumentation Scope to all metric points.
|
||||
func WithoutScopeInfo() Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
cfg.disableScopeInfo = true
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// WithNamespace configures the Exporter to prefix metric with the given namespace.
|
||||
// Metadata metrics such as target_info and otel_scope_info are not prefixed since these
|
||||
// have special behavior based on their name.
|
||||
func WithNamespace(ns string) Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
ns = sanitizeName(ns)
|
||||
if !strings.HasSuffix(ns, "_") {
|
||||
// namespace and metric names should be separated with an underscore,
|
||||
// adds a trailing underscore if there is not one already.
|
||||
ns = ns + "_"
|
||||
}
|
||||
|
||||
cfg.namespace = ns
|
||||
return cfg
|
||||
})
|
||||
}
|
18
vendor/go.opentelemetry.io/otel/exporters/prometheus/doc.go
generated
vendored
Normal file
18
vendor/go.opentelemetry.io/otel/exporters/prometheus/doc.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package prometheus provides a Prometheus Exporter that converts
|
||||
// OTLP metrics into the Prometheus exposition format and implements
|
||||
// prometheus.Collector to provide a handler for these metrics.
|
||||
package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
|
533
vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
generated
vendored
Normal file
533
vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go
generated
vendored
Normal file
@@ -0,0 +1,533 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
const (
|
||||
targetInfoMetricName = "target_info"
|
||||
targetInfoDescription = "Target metadata"
|
||||
|
||||
scopeInfoMetricName = "otel_scope_info"
|
||||
scopeInfoDescription = "Instrumentation Scope metadata"
|
||||
)
|
||||
|
||||
var (
|
||||
scopeInfoKeys = [2]string{"otel_scope_name", "otel_scope_version"}
|
||||
|
||||
errScopeInvalid = errors.New("invalid scope")
|
||||
)
|
||||
|
||||
// Exporter is a Prometheus Exporter that embeds the OTel metric.Reader
|
||||
// interface for easy instantiation with a MeterProvider.
|
||||
type Exporter struct {
|
||||
metric.Reader
|
||||
}
|
||||
|
||||
// MarshalLog returns logging data about the Exporter.
|
||||
func (e *Exporter) MarshalLog() interface{} {
|
||||
const t = "Prometheus exporter"
|
||||
|
||||
if r, ok := e.Reader.(*metric.ManualReader); ok {
|
||||
under := r.MarshalLog()
|
||||
if data, ok := under.(struct {
|
||||
Type string
|
||||
Registered bool
|
||||
Shutdown bool
|
||||
}); ok {
|
||||
data.Type = t
|
||||
return data
|
||||
}
|
||||
}
|
||||
|
||||
return struct{ Type string }{Type: t}
|
||||
}
|
||||
|
||||
var _ metric.Reader = &Exporter{}
|
||||
|
||||
// collector is used to implement prometheus.Collector.
|
||||
type collector struct {
|
||||
reader metric.Reader
|
||||
|
||||
withoutUnits bool
|
||||
withoutCounterSuffixes bool
|
||||
disableScopeInfo bool
|
||||
namespace string
|
||||
|
||||
mu sync.Mutex // mu protects all members below from the concurrent access.
|
||||
disableTargetInfo bool
|
||||
targetInfo prometheus.Metric
|
||||
scopeInfos map[instrumentation.Scope]prometheus.Metric
|
||||
scopeInfosInvalid map[instrumentation.Scope]struct{}
|
||||
metricFamilies map[string]*dto.MetricFamily
|
||||
}
|
||||
|
||||
// prometheus counters MUST have a _total suffix by default:
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/compatibility/prometheus_and_openmetrics.md
|
||||
const counterSuffix = "_total"
|
||||
|
||||
// New returns a Prometheus Exporter.
|
||||
func New(opts ...Option) (*Exporter, error) {
|
||||
cfg := newConfig(opts...)
|
||||
|
||||
// this assumes that the default temporality selector will always return cumulative.
|
||||
// we only support cumulative temporality, so building our own reader enforces this.
|
||||
// TODO (#3244): Enable some way to configure the reader, but not change temporality.
|
||||
reader := metric.NewManualReader(cfg.readerOpts...)
|
||||
|
||||
collector := &collector{
|
||||
reader: reader,
|
||||
disableTargetInfo: cfg.disableTargetInfo,
|
||||
withoutUnits: cfg.withoutUnits,
|
||||
withoutCounterSuffixes: cfg.withoutCounterSuffixes,
|
||||
disableScopeInfo: cfg.disableScopeInfo,
|
||||
scopeInfos: make(map[instrumentation.Scope]prometheus.Metric),
|
||||
scopeInfosInvalid: make(map[instrumentation.Scope]struct{}),
|
||||
metricFamilies: make(map[string]*dto.MetricFamily),
|
||||
namespace: cfg.namespace,
|
||||
}
|
||||
|
||||
if err := cfg.registerer.Register(collector); err != nil {
|
||||
return nil, fmt.Errorf("cannot register the collector: %w", err)
|
||||
}
|
||||
|
||||
e := &Exporter{
|
||||
Reader: reader,
|
||||
}
|
||||
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// Describe implements prometheus.Collector.
|
||||
func (c *collector) Describe(ch chan<- *prometheus.Desc) {
|
||||
// The Opentelemetry SDK doesn't have information on which will exist when the collector
|
||||
// is registered. By returning nothing we are an "unchecked" collector in Prometheus,
|
||||
// and assume responsibility for consistency of the metrics produced.
|
||||
//
|
||||
// See https://pkg.go.dev/github.com/prometheus/client_golang@v1.13.0/prometheus#hdr-Custom_Collectors_and_constant_Metrics
|
||||
}
|
||||
|
||||
// Collect implements prometheus.Collector.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (c *collector) Collect(ch chan<- prometheus.Metric) {
|
||||
// TODO (#3047): Use a sync.Pool instead of allocating metrics every Collect.
|
||||
metrics := metricdata.ResourceMetrics{}
|
||||
err := c.reader.Collect(context.TODO(), &metrics)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
if err == metric.ErrReaderNotRegistered {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
global.Debug("Prometheus exporter export", "Data", metrics)
|
||||
|
||||
// Initialize (once) targetInfo and disableTargetInfo.
|
||||
func() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.targetInfo == nil && !c.disableTargetInfo {
|
||||
targetInfo, err := createInfoMetric(targetInfoMetricName, targetInfoDescription, metrics.Resource)
|
||||
if err != nil {
|
||||
// If the target info metric is invalid, disable sending it.
|
||||
c.disableTargetInfo = true
|
||||
otel.Handle(err)
|
||||
return
|
||||
}
|
||||
|
||||
c.targetInfo = targetInfo
|
||||
}
|
||||
}()
|
||||
|
||||
if !c.disableTargetInfo {
|
||||
ch <- c.targetInfo
|
||||
}
|
||||
|
||||
for _, scopeMetrics := range metrics.ScopeMetrics {
|
||||
var keys, values [2]string
|
||||
|
||||
if !c.disableScopeInfo {
|
||||
scopeInfo, err := c.scopeInfo(scopeMetrics.Scope)
|
||||
if err == errScopeInvalid {
|
||||
// Do not report the same error multiple times.
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- scopeInfo
|
||||
|
||||
keys = scopeInfoKeys
|
||||
values = [2]string{scopeMetrics.Scope.Name, scopeMetrics.Scope.Version}
|
||||
}
|
||||
|
||||
for _, m := range scopeMetrics.Metrics {
|
||||
typ := c.metricType(m)
|
||||
if typ == nil {
|
||||
continue
|
||||
}
|
||||
name := c.getName(m, typ)
|
||||
|
||||
drop, help := c.validateMetrics(name, m.Description, typ)
|
||||
if drop {
|
||||
continue
|
||||
}
|
||||
|
||||
if help != "" {
|
||||
m.Description = help
|
||||
}
|
||||
|
||||
switch v := m.Data.(type) {
|
||||
case metricdata.Histogram[int64]:
|
||||
addHistogramMetric(ch, v, m, keys, values, name)
|
||||
case metricdata.Histogram[float64]:
|
||||
addHistogramMetric(ch, v, m, keys, values, name)
|
||||
case metricdata.Sum[int64]:
|
||||
addSumMetric(ch, v, m, keys, values, name)
|
||||
case metricdata.Sum[float64]:
|
||||
addSumMetric(ch, v, m, keys, values, name)
|
||||
case metricdata.Gauge[int64]:
|
||||
addGaugeMetric(ch, v, m, keys, values, name)
|
||||
case metricdata.Gauge[float64]:
|
||||
addGaugeMetric(ch, v, m, keys, values, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, ks, vs [2]string, name string) {
|
||||
// TODO(https://github.com/open-telemetry/opentelemetry-go/issues/3163): support exemplars
|
||||
for _, dp := range histogram.DataPoints {
|
||||
keys, values := getAttrs(dp.Attributes, ks, vs)
|
||||
|
||||
desc := prometheus.NewDesc(name, m.Description, keys, nil)
|
||||
buckets := make(map[float64]uint64, len(dp.Bounds))
|
||||
|
||||
cumulativeCount := uint64(0)
|
||||
for i, bound := range dp.Bounds {
|
||||
cumulativeCount += dp.BucketCounts[i]
|
||||
buckets[bound] = cumulativeCount
|
||||
}
|
||||
m, err := prometheus.NewConstHistogram(desc, dp.Count, float64(dp.Sum), buckets, values...)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
continue
|
||||
}
|
||||
ch <- m
|
||||
}
|
||||
}
|
||||
|
||||
func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, ks, vs [2]string, name string) {
|
||||
valueType := prometheus.CounterValue
|
||||
if !sum.IsMonotonic {
|
||||
valueType = prometheus.GaugeValue
|
||||
}
|
||||
|
||||
for _, dp := range sum.DataPoints {
|
||||
keys, values := getAttrs(dp.Attributes, ks, vs)
|
||||
|
||||
desc := prometheus.NewDesc(name, m.Description, keys, nil)
|
||||
m, err := prometheus.NewConstMetric(desc, valueType, float64(dp.Value), values...)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
continue
|
||||
}
|
||||
ch <- m
|
||||
}
|
||||
}
|
||||
|
||||
func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, ks, vs [2]string, name string) {
|
||||
for _, dp := range gauge.DataPoints {
|
||||
keys, values := getAttrs(dp.Attributes, ks, vs)
|
||||
|
||||
desc := prometheus.NewDesc(name, m.Description, keys, nil)
|
||||
m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(dp.Value), values...)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
continue
|
||||
}
|
||||
ch <- m
|
||||
}
|
||||
}
|
||||
|
||||
// getAttrs parses the attribute.Set to two lists of matching Prometheus-style
|
||||
// keys and values. It sanitizes invalid characters and handles duplicate keys
|
||||
// (due to sanitization) by sorting and concatenating the values following the spec.
|
||||
func getAttrs(attrs attribute.Set, ks, vs [2]string) ([]string, []string) {
|
||||
keysMap := make(map[string][]string)
|
||||
itr := attrs.Iter()
|
||||
for itr.Next() {
|
||||
kv := itr.Attribute()
|
||||
key := strings.Map(sanitizeRune, string(kv.Key))
|
||||
if _, ok := keysMap[key]; !ok {
|
||||
keysMap[key] = []string{kv.Value.Emit()}
|
||||
} else {
|
||||
// if the sanitized key is a duplicate, append to the list of keys
|
||||
keysMap[key] = append(keysMap[key], kv.Value.Emit())
|
||||
}
|
||||
}
|
||||
|
||||
keys := make([]string, 0, attrs.Len())
|
||||
values := make([]string, 0, attrs.Len())
|
||||
for key, vals := range keysMap {
|
||||
keys = append(keys, key)
|
||||
sort.Slice(vals, func(i, j int) bool {
|
||||
return i < j
|
||||
})
|
||||
values = append(values, strings.Join(vals, ";"))
|
||||
}
|
||||
|
||||
if ks[0] != "" {
|
||||
keys = append(keys, ks[:]...)
|
||||
values = append(values, vs[:]...)
|
||||
}
|
||||
return keys, values
|
||||
}
|
||||
|
||||
func createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) {
|
||||
keys, values := getAttrs(*res.Set(), [2]string{}, [2]string{})
|
||||
desc := prometheus.NewDesc(name, description, keys, nil)
|
||||
return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...)
|
||||
}
|
||||
|
||||
func createScopeInfoMetric(scope instrumentation.Scope) (prometheus.Metric, error) {
|
||||
keys := scopeInfoKeys[:]
|
||||
desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil)
|
||||
return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), scope.Name, scope.Version)
|
||||
}
|
||||
|
||||
func sanitizeRune(r rune) rune {
|
||||
if unicode.IsLetter(r) || unicode.IsDigit(r) || r == ':' || r == '_' {
|
||||
return r
|
||||
}
|
||||
return '_'
|
||||
}
|
||||
|
||||
var unitSuffixes = map[string]string{
|
||||
// Time
|
||||
"d": "_days",
|
||||
"h": "_hours",
|
||||
"min": "_minutes",
|
||||
"s": "_seconds",
|
||||
"ms": "_milliseconds",
|
||||
"us": "_microseconds",
|
||||
"ns": "_nanoseconds",
|
||||
|
||||
// Bytes
|
||||
"By": "_bytes",
|
||||
"KiBy": "_kibibytes",
|
||||
"MiBy": "_mebibytes",
|
||||
"GiBy": "_gibibytes",
|
||||
"TiBy": "_tibibytes",
|
||||
"KBy": "_kilobytes",
|
||||
"MBy": "_megabytes",
|
||||
"GBy": "_gigabytes",
|
||||
"TBy": "_terabytes",
|
||||
|
||||
// SI
|
||||
"m": "_meters",
|
||||
"V": "_volts",
|
||||
"A": "_amperes",
|
||||
"J": "_joules",
|
||||
"W": "_watts",
|
||||
"g": "_grams",
|
||||
|
||||
// Misc
|
||||
"Cel": "_celsius",
|
||||
"Hz": "_hertz",
|
||||
"1": "_ratio",
|
||||
"%": "_percent",
|
||||
}
|
||||
|
||||
// getName returns the sanitized name, prefixed with the namespace and suffixed with unit.
|
||||
func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string {
|
||||
name := sanitizeName(m.Name)
|
||||
addCounterSuffix := !c.withoutCounterSuffixes && *typ == dto.MetricType_COUNTER
|
||||
if addCounterSuffix {
|
||||
// Remove the _total suffix here, as we will re-add the total suffix
|
||||
// later, and it needs to come after the unit suffix.
|
||||
name = strings.TrimSuffix(name, counterSuffix)
|
||||
}
|
||||
if c.namespace != "" {
|
||||
name = c.namespace + name
|
||||
}
|
||||
if suffix, ok := unitSuffixes[m.Unit]; ok && !c.withoutUnits && !strings.HasSuffix(name, suffix) {
|
||||
name += suffix
|
||||
}
|
||||
if addCounterSuffix {
|
||||
name += counterSuffix
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func sanitizeName(n string) string {
|
||||
// This algorithm is based on strings.Map from Go 1.19.
|
||||
const replacement = '_'
|
||||
|
||||
valid := func(i int, r rune) bool {
|
||||
// Taken from
|
||||
// https://github.com/prometheus/common/blob/dfbc25bd00225c70aca0d94c3c4bb7744f28ace0/model/metric.go#L92-L102
|
||||
if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || r == '_' || r == ':' || (r >= '0' && r <= '9' && i > 0) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// This output buffer b is initialized on demand, the first time a
|
||||
// character needs to be replaced.
|
||||
var b strings.Builder
|
||||
for i, c := range n {
|
||||
if valid(i, c) {
|
||||
continue
|
||||
}
|
||||
|
||||
if i == 0 && c >= '0' && c <= '9' {
|
||||
// Prefix leading number with replacement character.
|
||||
b.Grow(len(n) + 1)
|
||||
_ = b.WriteByte(byte(replacement))
|
||||
break
|
||||
}
|
||||
b.Grow(len(n))
|
||||
_, _ = b.WriteString(n[:i])
|
||||
_ = b.WriteByte(byte(replacement))
|
||||
width := utf8.RuneLen(c)
|
||||
n = n[i+width:]
|
||||
break
|
||||
}
|
||||
|
||||
// Fast path for unchanged input.
|
||||
if b.Cap() == 0 { // b.Grow was not called above.
|
||||
return n
|
||||
}
|
||||
|
||||
for _, c := range n {
|
||||
// Due to inlining, it is more performant to invoke WriteByte rather then
|
||||
// WriteRune.
|
||||
if valid(1, c) { // We are guaranteed to not be at the start.
|
||||
_ = b.WriteByte(byte(c))
|
||||
} else {
|
||||
_ = b.WriteByte(byte(replacement))
|
||||
}
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType {
|
||||
switch v := m.Data.(type) {
|
||||
case metricdata.Histogram[int64], metricdata.Histogram[float64]:
|
||||
return dto.MetricType_HISTOGRAM.Enum()
|
||||
case metricdata.Sum[float64]:
|
||||
if v.IsMonotonic {
|
||||
return dto.MetricType_COUNTER.Enum()
|
||||
}
|
||||
return dto.MetricType_GAUGE.Enum()
|
||||
case metricdata.Sum[int64]:
|
||||
if v.IsMonotonic {
|
||||
return dto.MetricType_COUNTER.Enum()
|
||||
}
|
||||
return dto.MetricType_GAUGE.Enum()
|
||||
case metricdata.Gauge[int64], metricdata.Gauge[float64]:
|
||||
return dto.MetricType_GAUGE.Enum()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *collector) scopeInfo(scope instrumentation.Scope) (prometheus.Metric, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
scopeInfo, ok := c.scopeInfos[scope]
|
||||
if ok {
|
||||
return scopeInfo, nil
|
||||
}
|
||||
|
||||
if _, ok := c.scopeInfosInvalid[scope]; ok {
|
||||
return nil, errScopeInvalid
|
||||
}
|
||||
|
||||
scopeInfo, err := createScopeInfoMetric(scope)
|
||||
if err != nil {
|
||||
c.scopeInfosInvalid[scope] = struct{}{}
|
||||
return nil, fmt.Errorf("cannot create scope info metric: %w", err)
|
||||
}
|
||||
|
||||
c.scopeInfos[scope] = scopeInfo
|
||||
|
||||
return scopeInfo, nil
|
||||
}
|
||||
|
||||
func (c *collector) validateMetrics(name, description string, metricType *dto.MetricType) (drop bool, help string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
emf, exist := c.metricFamilies[name]
|
||||
|
||||
if !exist {
|
||||
c.metricFamilies[name] = &dto.MetricFamily{
|
||||
Name: proto.String(name),
|
||||
Help: proto.String(description),
|
||||
Type: metricType,
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
if emf.GetType() != *metricType {
|
||||
global.Error(
|
||||
errors.New("instrument type conflict"),
|
||||
"Using existing type definition.",
|
||||
"instrument", name,
|
||||
"existing", emf.GetType(),
|
||||
"dropped", *metricType,
|
||||
)
|
||||
return true, ""
|
||||
}
|
||||
if emf.GetHelp() != description {
|
||||
global.Info(
|
||||
"Instrument description conflict, using existing",
|
||||
"instrument", name,
|
||||
"existing", emf.GetHelp(),
|
||||
"dropped", description,
|
||||
)
|
||||
return false, emf.GetHelp()
|
||||
}
|
||||
|
||||
return false, ""
|
||||
}
|
264
vendor/go.opentelemetry.io/otel/metric/noop/noop.go
generated
vendored
Normal file
264
vendor/go.opentelemetry.io/otel/metric/noop/noop.go
generated
vendored
Normal file
@@ -0,0 +1,264 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package noop provides an implementation of the OpenTelemetry metric API that
|
||||
// produces no telemetry and minimizes used computation resources.
|
||||
//
|
||||
// Using this package to implement the OpenTelemetry metric API will
|
||||
// effectively disable OpenTelemetry.
|
||||
//
|
||||
// This implementation can be embedded in other implementations of the
|
||||
// OpenTelemetry metric API. Doing so will mean the implementation defaults to
|
||||
// no operation for methods it does not implement.
|
||||
package noop // import "go.opentelemetry.io/otel/metric/noop"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/embedded"
|
||||
)
|
||||
|
||||
var (
|
||||
// Compile-time check this implements the OpenTelemetry API.
|
||||
|
||||
_ metric.MeterProvider = MeterProvider{}
|
||||
_ metric.Meter = Meter{}
|
||||
_ metric.Observer = Observer{}
|
||||
_ metric.Registration = Registration{}
|
||||
_ metric.Int64Counter = Int64Counter{}
|
||||
_ metric.Float64Counter = Float64Counter{}
|
||||
_ metric.Int64UpDownCounter = Int64UpDownCounter{}
|
||||
_ metric.Float64UpDownCounter = Float64UpDownCounter{}
|
||||
_ metric.Int64Histogram = Int64Histogram{}
|
||||
_ metric.Float64Histogram = Float64Histogram{}
|
||||
_ metric.Int64ObservableCounter = Int64ObservableCounter{}
|
||||
_ metric.Float64ObservableCounter = Float64ObservableCounter{}
|
||||
_ metric.Int64ObservableGauge = Int64ObservableGauge{}
|
||||
_ metric.Float64ObservableGauge = Float64ObservableGauge{}
|
||||
_ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{}
|
||||
_ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{}
|
||||
_ metric.Int64Observer = Int64Observer{}
|
||||
_ metric.Float64Observer = Float64Observer{}
|
||||
)
|
||||
|
||||
// MeterProvider is an OpenTelemetry No-Op MeterProvider.
|
||||
type MeterProvider struct{ embedded.MeterProvider }
|
||||
|
||||
// NewMeterProvider returns a MeterProvider that does not record any telemetry.
|
||||
func NewMeterProvider() MeterProvider {
|
||||
return MeterProvider{}
|
||||
}
|
||||
|
||||
// Meter returns an OpenTelemetry Meter that does not record any telemetry.
|
||||
func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
|
||||
return Meter{}
|
||||
}
|
||||
|
||||
// Meter is an OpenTelemetry No-Op Meter.
|
||||
type Meter struct{ embedded.Meter }
|
||||
|
||||
// Int64Counter returns a Counter used to record int64 measurements that
|
||||
// produces no telemetry.
|
||||
func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
|
||||
return Int64Counter{}, nil
|
||||
}
|
||||
|
||||
// Int64UpDownCounter returns an UpDownCounter used to record int64
|
||||
// measurements that produces no telemetry.
|
||||
func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
|
||||
return Int64UpDownCounter{}, nil
|
||||
}
|
||||
|
||||
// Int64Histogram returns a Histogram used to record int64 measurements that
|
||||
// produces no telemetry.
|
||||
func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
|
||||
return Int64Histogram{}, nil
|
||||
}
|
||||
|
||||
// Int64ObservableCounter returns an ObservableCounter used to record int64
|
||||
// measurements that produces no telemetry.
|
||||
func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
|
||||
return Int64ObservableCounter{}, nil
|
||||
}
|
||||
|
||||
// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to
|
||||
// record int64 measurements that produces no telemetry.
|
||||
func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
|
||||
return Int64ObservableUpDownCounter{}, nil
|
||||
}
|
||||
|
||||
// Int64ObservableGauge returns an ObservableGauge used to record int64
|
||||
// measurements that produces no telemetry.
|
||||
func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
|
||||
return Int64ObservableGauge{}, nil
|
||||
}
|
||||
|
||||
// Float64Counter returns a Counter used to record int64 measurements that
|
||||
// produces no telemetry.
|
||||
func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) {
|
||||
return Float64Counter{}, nil
|
||||
}
|
||||
|
||||
// Float64UpDownCounter returns an UpDownCounter used to record int64
|
||||
// measurements that produces no telemetry.
|
||||
func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
|
||||
return Float64UpDownCounter{}, nil
|
||||
}
|
||||
|
||||
// Float64Histogram returns a Histogram used to record int64 measurements that
|
||||
// produces no telemetry.
|
||||
func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
|
||||
return Float64Histogram{}, nil
|
||||
}
|
||||
|
||||
// Float64ObservableCounter returns an ObservableCounter used to record int64
|
||||
// measurements that produces no telemetry.
|
||||
func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
|
||||
return Float64ObservableCounter{}, nil
|
||||
}
|
||||
|
||||
// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to
|
||||
// record int64 measurements that produces no telemetry.
|
||||
func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
|
||||
return Float64ObservableUpDownCounter{}, nil
|
||||
}
|
||||
|
||||
// Float64ObservableGauge returns an ObservableGauge used to record int64
|
||||
// measurements that produces no telemetry.
|
||||
func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
|
||||
return Float64ObservableGauge{}, nil
|
||||
}
|
||||
|
||||
// RegisterCallback performs no operation.
|
||||
func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) {
|
||||
return Registration{}, nil
|
||||
}
|
||||
|
||||
// Observer acts as a recorder of measurements for multiple instruments in a
|
||||
// Callback, it performing no operation.
|
||||
type Observer struct{ embedded.Observer }
|
||||
|
||||
// ObserveFloat64 performs no operation.
|
||||
func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) {
|
||||
}
|
||||
|
||||
// ObserveInt64 performs no operation.
|
||||
func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) {
|
||||
}
|
||||
|
||||
// Registration is the registration of a Callback with a No-Op Meter.
|
||||
type Registration struct{ embedded.Registration }
|
||||
|
||||
// Unregister unregisters the Callback the Registration represents with the
|
||||
// No-Op Meter. This will always return nil because the No-Op Meter performs no
|
||||
// operation, including hold any record of registrations.
|
||||
func (Registration) Unregister() error { return nil }
|
||||
|
||||
// Int64Counter is an OpenTelemetry Counter used to record int64 measurements.
|
||||
// It produces no telemetry.
|
||||
type Int64Counter struct{ embedded.Int64Counter }
|
||||
|
||||
// Add performs no operation.
|
||||
func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
|
||||
|
||||
// Float64Counter is an OpenTelemetry Counter used to record float64
|
||||
// measurements. It produces no telemetry.
|
||||
type Float64Counter struct{ embedded.Float64Counter }
|
||||
|
||||
// Add performs no operation.
|
||||
func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
|
||||
|
||||
// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
|
||||
// measurements. It produces no telemetry.
|
||||
type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
|
||||
|
||||
// Add performs no operation.
|
||||
func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
|
||||
|
||||
// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
|
||||
// float64 measurements. It produces no telemetry.
|
||||
type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
|
||||
|
||||
// Add performs no operation.
|
||||
func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
|
||||
|
||||
// Int64Histogram is an OpenTelemetry Histogram used to record int64
|
||||
// measurements. It produces no telemetry.
|
||||
type Int64Histogram struct{ embedded.Int64Histogram }
|
||||
|
||||
// Record performs no operation.
|
||||
func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
|
||||
|
||||
// Float64Histogram is an OpenTelemetry Histogram used to record float64
|
||||
// measurements. It produces no telemetry.
|
||||
type Float64Histogram struct{ embedded.Float64Histogram }
|
||||
|
||||
// Record performs no operation.
|
||||
func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
|
||||
|
||||
// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
|
||||
// int64 measurements. It produces no telemetry.
|
||||
type Int64ObservableCounter struct {
|
||||
metric.Int64Observable
|
||||
embedded.Int64ObservableCounter
|
||||
}
|
||||
|
||||
// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record
|
||||
// float64 measurements. It produces no telemetry.
|
||||
type Float64ObservableCounter struct {
|
||||
metric.Float64Observable
|
||||
embedded.Float64ObservableCounter
|
||||
}
|
||||
|
||||
// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record
|
||||
// int64 measurements. It produces no telemetry.
|
||||
type Int64ObservableGauge struct {
|
||||
metric.Int64Observable
|
||||
embedded.Int64ObservableGauge
|
||||
}
|
||||
|
||||
// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record
|
||||
// float64 measurements. It produces no telemetry.
|
||||
type Float64ObservableGauge struct {
|
||||
metric.Float64Observable
|
||||
embedded.Float64ObservableGauge
|
||||
}
|
||||
|
||||
// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
|
||||
// used to record int64 measurements. It produces no telemetry.
|
||||
type Int64ObservableUpDownCounter struct {
|
||||
metric.Int64Observable
|
||||
embedded.Int64ObservableUpDownCounter
|
||||
}
|
||||
|
||||
// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
|
||||
// used to record float64 measurements. It produces no telemetry.
|
||||
type Float64ObservableUpDownCounter struct {
|
||||
metric.Float64Observable
|
||||
embedded.Float64ObservableUpDownCounter
|
||||
}
|
||||
|
||||
// Int64Observer is a recorder of int64 measurements that performs no operation.
|
||||
type Int64Observer struct{ embedded.Int64Observer }
|
||||
|
||||
// Observe performs no operation.
|
||||
func (Int64Observer) Observe(int64, ...metric.ObserveOption) {}
|
||||
|
||||
// Float64Observer is a recorder of float64 measurements that performs no
|
||||
// operation.
|
||||
type Float64Observer struct{ embedded.Float64Observer }
|
||||
|
||||
// Observe performs no operation.
|
||||
func (Float64Observer) Observe(float64, ...metric.ObserveOption) {}
|
201
vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
generated
vendored
Normal file
201
vendor/go.opentelemetry.io/otel/sdk/metric/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
201
vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go
generated
vendored
Normal file
201
vendor/go.opentelemetry.io/otel/sdk/metric/aggregation.go
generated
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// errAgg is wrapped by misconfigured aggregations.
|
||||
var errAgg = errors.New("aggregation")
|
||||
|
||||
// Aggregation is the aggregation used to summarize recorded measurements.
|
||||
type Aggregation interface {
|
||||
// copy returns a deep copy of the Aggregation.
|
||||
copy() Aggregation
|
||||
|
||||
// err returns an error for any misconfigured Aggregation.
|
||||
err() error
|
||||
}
|
||||
|
||||
// AggregationDrop is an Aggregation that drops all recorded data.
|
||||
type AggregationDrop struct{} // AggregationDrop has no parameters.
|
||||
|
||||
var _ Aggregation = AggregationDrop{}
|
||||
|
||||
// copy returns a deep copy of d.
|
||||
func (d AggregationDrop) copy() Aggregation { return d }
|
||||
|
||||
// err returns an error for any misconfiguration. A drop aggregation has no
|
||||
// parameters and cannot be misconfigured, therefore this always returns nil.
|
||||
func (AggregationDrop) err() error { return nil }
|
||||
|
||||
// AggregationDefault is an Aggregation that uses the default instrument kind selection
|
||||
// mapping to select another Aggregation. A metric reader can be configured to
|
||||
// make an aggregation selection based on instrument kind that differs from
|
||||
// the default. This Aggregation ensures the default is used.
|
||||
//
|
||||
// See the [DefaultAggregationSelector] for information about the default
|
||||
// instrument kind selection mapping.
|
||||
type AggregationDefault struct{} // AggregationDefault has no parameters.
|
||||
|
||||
var _ Aggregation = AggregationDefault{}
|
||||
|
||||
// copy returns a deep copy of d.
|
||||
func (d AggregationDefault) copy() Aggregation { return d }
|
||||
|
||||
// err returns an error for any misconfiguration. A default aggregation has no
|
||||
// parameters and cannot be misconfigured, therefore this always returns nil.
|
||||
func (AggregationDefault) err() error { return nil }
|
||||
|
||||
// AggregationSum is an Aggregation that summarizes a set of measurements as their
|
||||
// arithmetic sum.
|
||||
type AggregationSum struct{} // AggregationSum has no parameters.
|
||||
|
||||
var _ Aggregation = AggregationSum{}
|
||||
|
||||
// copy returns a deep copy of s.
|
||||
func (s AggregationSum) copy() Aggregation { return s }
|
||||
|
||||
// err returns an error for any misconfiguration. A sum aggregation has no
|
||||
// parameters and cannot be misconfigured, therefore this always returns nil.
|
||||
func (AggregationSum) err() error { return nil }
|
||||
|
||||
// AggregationLastValue is an Aggregation that summarizes a set of measurements as the
|
||||
// last one made.
|
||||
type AggregationLastValue struct{} // AggregationLastValue has no parameters.
|
||||
|
||||
var _ Aggregation = AggregationLastValue{}
|
||||
|
||||
// copy returns a deep copy of l.
|
||||
func (l AggregationLastValue) copy() Aggregation { return l }
|
||||
|
||||
// err returns an error for any misconfiguration. A last-value aggregation has
|
||||
// no parameters and cannot be misconfigured, therefore this always returns
|
||||
// nil.
|
||||
func (AggregationLastValue) err() error { return nil }
|
||||
|
||||
// AggregationExplicitBucketHistogram is an Aggregation that summarizes a set of
|
||||
// measurements as an histogram with explicitly defined buckets.
|
||||
type AggregationExplicitBucketHistogram struct {
|
||||
// Boundaries are the increasing bucket boundary values. Boundary values
|
||||
// define bucket upper bounds. Buckets are exclusive of their lower
|
||||
// boundary and inclusive of their upper bound (except at positive
|
||||
// infinity). A measurement is defined to fall into the greatest-numbered
|
||||
// bucket with a boundary that is greater than or equal to the
|
||||
// measurement. As an example, boundaries defined as:
|
||||
//
|
||||
// []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}
|
||||
//
|
||||
// Will define these buckets:
|
||||
//
|
||||
// (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, 25.0], (25.0, 50.0],
|
||||
// (50.0, 75.0], (75.0, 100.0], (100.0, 250.0], (250.0, 500.0],
|
||||
// (500.0, 1000.0], (1000.0, +∞)
|
||||
Boundaries []float64
|
||||
// NoMinMax indicates whether to not record the min and max of the
|
||||
// distribution. By default, these extrema are recorded.
|
||||
//
|
||||
// Recording these extrema for cumulative data is expected to have little
|
||||
// value, they will represent the entire life of the instrument instead of
|
||||
// just the current collection cycle. It is recommended to set this to true
|
||||
// for that type of data to avoid computing the low-value extrema.
|
||||
NoMinMax bool
|
||||
}
|
||||
|
||||
var _ Aggregation = AggregationExplicitBucketHistogram{}
|
||||
|
||||
// errHist is returned by misconfigured ExplicitBucketHistograms.
|
||||
var errHist = fmt.Errorf("%w: explicit bucket histogram", errAgg)
|
||||
|
||||
// err returns an error for any misconfiguration.
|
||||
func (h AggregationExplicitBucketHistogram) err() error {
|
||||
if len(h.Boundaries) <= 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check boundaries are monotonic.
|
||||
i := h.Boundaries[0]
|
||||
for _, j := range h.Boundaries[1:] {
|
||||
if i >= j {
|
||||
return fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, h.Boundaries)
|
||||
}
|
||||
i = j
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// copy returns a deep copy of h.
|
||||
func (h AggregationExplicitBucketHistogram) copy() Aggregation {
|
||||
b := make([]float64, len(h.Boundaries))
|
||||
copy(b, h.Boundaries)
|
||||
return AggregationExplicitBucketHistogram{
|
||||
Boundaries: b,
|
||||
NoMinMax: h.NoMinMax,
|
||||
}
|
||||
}
|
||||
|
||||
// AggregationBase2ExponentialHistogram is an Aggregation that summarizes a set of
|
||||
// measurements as an histogram with bucket widths that grow exponentially.
|
||||
type AggregationBase2ExponentialHistogram struct {
|
||||
// MaxSize is the maximum number of buckets to use for the histogram.
|
||||
MaxSize int32
|
||||
// MaxScale is the maximum resolution scale to use for the histogram.
|
||||
//
|
||||
// MaxScale has a maximum value of 20. Using a value of 20 means the
|
||||
// maximum number of buckets that can fit within the range of a
|
||||
// signed 32-bit integer index could be used.
|
||||
//
|
||||
// MaxScale has a minimum value of -10. Using a value of -10 means only
|
||||
// two buckets will be used.
|
||||
MaxScale int32
|
||||
|
||||
// NoMinMax indicates whether to not record the min and max of the
|
||||
// distribution. By default, these extrema are recorded.
|
||||
//
|
||||
// Recording these extrema for cumulative data is expected to have little
|
||||
// value, they will represent the entire life of the instrument instead of
|
||||
// just the current collection cycle. It is recommended to set this to true
|
||||
// for that type of data to avoid computing the low-value extrema.
|
||||
NoMinMax bool
|
||||
}
|
||||
|
||||
var _ Aggregation = AggregationBase2ExponentialHistogram{}
|
||||
|
||||
// copy returns a deep copy of the Aggregation.
|
||||
func (e AggregationBase2ExponentialHistogram) copy() Aggregation {
|
||||
return e
|
||||
}
|
||||
|
||||
const (
|
||||
expoMaxScale = 20
|
||||
expoMinScale = -10
|
||||
)
|
||||
|
||||
// errExpoHist is returned by misconfigured Base2ExponentialBucketHistograms.
|
||||
var errExpoHist = fmt.Errorf("%w: exponential histogram", errAgg)
|
||||
|
||||
// err returns an error for any misconfigured Aggregation.
|
||||
func (e AggregationBase2ExponentialHistogram) err() error {
|
||||
if e.MaxScale > expoMaxScale {
|
||||
return fmt.Errorf("%w: max size %d is greater than maximum scale %d", errExpoHist, e.MaxSize, expoMaxScale)
|
||||
}
|
||||
if e.MaxSize <= 0 {
|
||||
return fmt.Errorf("%w: max size %d is less than or equal to zero", errExpoHist, e.MaxSize)
|
||||
}
|
||||
return nil
|
||||
}
|
54
vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
generated
vendored
Normal file
54
vendor/go.opentelemetry.io/otel/sdk/metric/cache.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// cache is a locking storage used to quickly return already computed values.
|
||||
//
|
||||
// The zero value of a cache is empty and ready to use.
|
||||
//
|
||||
// A cache must not be copied after first use.
|
||||
//
|
||||
// All methods of a cache are safe to call concurrently.
|
||||
type cache[K comparable, V any] struct {
|
||||
sync.Mutex
|
||||
data map[K]V
|
||||
}
|
||||
|
||||
// Lookup returns the value stored in the cache with the associated key if it
|
||||
// exists. Otherwise, f is called and its returned value is set in the cache
|
||||
// for key and returned.
|
||||
//
|
||||
// Lookup is safe to call concurrently. It will hold the cache lock, so f
|
||||
// should not block excessively.
|
||||
func (c *cache[K, V]) Lookup(key K, f func() V) V {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if c.data == nil {
|
||||
val := f()
|
||||
c.data = map[K]V{key: val}
|
||||
return val
|
||||
}
|
||||
if v, ok := c.data[key]; ok {
|
||||
return v
|
||||
}
|
||||
val := f()
|
||||
c.data[key] = val
|
||||
return val
|
||||
}
|
148
vendor/go.opentelemetry.io/otel/sdk/metric/config.go
generated
vendored
Normal file
148
vendor/go.opentelemetry.io/otel/sdk/metric/config.go
generated
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// config contains configuration options for a MeterProvider.
|
||||
type config struct {
|
||||
res *resource.Resource
|
||||
readers []Reader
|
||||
views []View
|
||||
}
|
||||
|
||||
// readerSignals returns a force-flush and shutdown function for a
|
||||
// MeterProvider to call in their respective options. All Readers c contains
|
||||
// will have their force-flush and shutdown methods unified into returned
|
||||
// single functions.
|
||||
func (c config) readerSignals() (forceFlush, shutdown func(context.Context) error) {
|
||||
var fFuncs, sFuncs []func(context.Context) error
|
||||
for _, r := range c.readers {
|
||||
sFuncs = append(sFuncs, r.Shutdown)
|
||||
if f, ok := r.(interface{ ForceFlush(context.Context) error }); ok {
|
||||
fFuncs = append(fFuncs, f.ForceFlush)
|
||||
}
|
||||
}
|
||||
|
||||
return unify(fFuncs), unifyShutdown(sFuncs)
|
||||
}
|
||||
|
||||
// unify unifies calling all of funcs into a single function call. All errors
|
||||
// returned from calls to funcs will be unify into a single error return
|
||||
// value.
|
||||
func unify(funcs []func(context.Context) error) func(context.Context) error {
|
||||
return func(ctx context.Context) error {
|
||||
var errs []error
|
||||
for _, f := range funcs {
|
||||
if err := f(ctx); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return unifyErrors(errs)
|
||||
}
|
||||
}
|
||||
|
||||
// unifyErrors combines multiple errors into a single error.
|
||||
func unifyErrors(errs []error) error {
|
||||
switch len(errs) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return errs[0]
|
||||
default:
|
||||
return fmt.Errorf("%v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
// unifyShutdown unifies calling all of funcs once for a shutdown. If called
|
||||
// more than once, an ErrReaderShutdown error is returned.
|
||||
func unifyShutdown(funcs []func(context.Context) error) func(context.Context) error {
|
||||
f := unify(funcs)
|
||||
var once sync.Once
|
||||
return func(ctx context.Context) error {
|
||||
err := ErrReaderShutdown
|
||||
once.Do(func() { err = f(ctx) })
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// newConfig returns a config configured with options.
|
||||
func newConfig(options []Option) config {
|
||||
conf := config{res: resource.Default()}
|
||||
for _, o := range options {
|
||||
conf = o.apply(conf)
|
||||
}
|
||||
return conf
|
||||
}
|
||||
|
||||
// Option applies a configuration option value to a MeterProvider.
|
||||
type Option interface {
|
||||
apply(config) config
|
||||
}
|
||||
|
||||
// optionFunc applies a set of options to a config.
|
||||
type optionFunc func(config) config
|
||||
|
||||
// apply returns a config with option(s) applied.
|
||||
func (o optionFunc) apply(conf config) config {
|
||||
return o(conf)
|
||||
}
|
||||
|
||||
// WithResource associates a Resource with a MeterProvider. This Resource
|
||||
// represents the entity producing telemetry and is associated with all Meters
|
||||
// the MeterProvider will create.
|
||||
//
|
||||
// By default, if this Option is not used, the default Resource from the
|
||||
// go.opentelemetry.io/otel/sdk/resource package will be used.
|
||||
func WithResource(res *resource.Resource) Option {
|
||||
return optionFunc(func(conf config) config {
|
||||
conf.res = res
|
||||
return conf
|
||||
})
|
||||
}
|
||||
|
||||
// WithReader associates Reader r with a MeterProvider.
|
||||
//
|
||||
// By default, if this option is not used, the MeterProvider will perform no
|
||||
// operations; no data will be exported without a Reader.
|
||||
func WithReader(r Reader) Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
if r == nil {
|
||||
return cfg
|
||||
}
|
||||
cfg.readers = append(cfg.readers, r)
|
||||
return cfg
|
||||
})
|
||||
}
|
||||
|
||||
// WithView associates views a MeterProvider.
|
||||
//
|
||||
// Views are appended to existing ones in a MeterProvider if this option is
|
||||
// used multiple times.
|
||||
//
|
||||
// By default, if this option is not used, the MeterProvider will use the
|
||||
// default view.
|
||||
func WithView(views ...View) Option {
|
||||
return optionFunc(func(cfg config) config {
|
||||
cfg.views = append(cfg.views, views...)
|
||||
return cfg
|
||||
})
|
||||
}
|
47
vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
generated
vendored
Normal file
47
vendor/go.opentelemetry.io/otel/sdk/metric/doc.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package metric provides an implementation of the OpenTelemetry metrics SDK.
|
||||
//
|
||||
// See https://opentelemetry.io/docs/concepts/signals/metrics/ for information
|
||||
// about the concept of OpenTelemetry metrics and
|
||||
// https://opentelemetry.io/docs/concepts/components/ for more information
|
||||
// about OpenTelemetry SDKs.
|
||||
//
|
||||
// The entry point for the metric package is the MeterProvider. It is the
|
||||
// object that all API calls use to create Meters, instruments, and ultimately
|
||||
// make metric measurements. Also, it is an object that should be used to
|
||||
// control the life-cycle (start, flush, and shutdown) of the SDK.
|
||||
//
|
||||
// A MeterProvider needs to be configured to export the measured data, this is
|
||||
// done by configuring it with a Reader implementation (using the WithReader
|
||||
// MeterProviderOption). Readers take two forms: ones that push to an endpoint
|
||||
// (NewPeriodicReader), and ones that an endpoint pulls from. See
|
||||
// [go.opentelemetry.io/otel/exporters] for exporters that can be used as
|
||||
// or with these Readers.
|
||||
//
|
||||
// Each Reader, when registered with the MeterProvider, can be augmented with a
|
||||
// View. Views allow users that run OpenTelemetry instrumented code to modify
|
||||
// the generated data of that instrumentation.
|
||||
//
|
||||
// The data generated by a MeterProvider needs to include information about its
|
||||
// origin. A MeterProvider needs to be configured with a Resource, using the
|
||||
// WithResource MeterProviderOption, to include this information. This Resource
|
||||
// should be used to describe the unique runtime environment instrumented code
|
||||
// is being run on. That way when multiple instances of the code are collected
|
||||
// at a single endpoint their origin is decipherable.
|
||||
//
|
||||
// See [go.opentelemetry.io/otel/metric] for more information about
|
||||
// the metric API.
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
50
vendor/go.opentelemetry.io/otel/sdk/metric/env.go
generated
vendored
Normal file
50
vendor/go.opentelemetry.io/otel/sdk/metric/env.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
)
|
||||
|
||||
// Environment variable names.
|
||||
const (
|
||||
// The time interval (in milliseconds) between the start of two export attempts.
|
||||
envInterval = "OTEL_METRIC_EXPORT_INTERVAL"
|
||||
// Maximum allowed time (in milliseconds) to export data.
|
||||
envTimeout = "OTEL_METRIC_EXPORT_TIMEOUT"
|
||||
)
|
||||
|
||||
// envDuration returns an environment variable's value as duration in milliseconds if it is exists,
|
||||
// or the defaultValue if the environment variable is not defined or the value is not valid.
|
||||
func envDuration(key string, defaultValue time.Duration) time.Duration {
|
||||
v := os.Getenv(key)
|
||||
if v == "" {
|
||||
return defaultValue
|
||||
}
|
||||
d, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
global.Error(err, "parse duration", "environment variable", key, "value", v)
|
||||
return defaultValue
|
||||
}
|
||||
if d <= 0 {
|
||||
global.Error(errNonPositiveDuration, "non-positive duration", "environment variable", key, "value", v)
|
||||
return defaultValue
|
||||
}
|
||||
return time.Duration(d) * time.Millisecond
|
||||
}
|
88
vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go
generated
vendored
Normal file
88
vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// ErrExporterShutdown is returned if Export or Shutdown are called after an
|
||||
// Exporter has been Shutdown.
|
||||
var ErrExporterShutdown = fmt.Errorf("exporter is shutdown")
|
||||
|
||||
// Exporter handles the delivery of metric data to external receivers. This is
|
||||
// the final component in the metric push pipeline.
|
||||
type Exporter interface {
|
||||
// Temporality returns the Temporality to use for an instrument kind.
|
||||
//
|
||||
// This method needs to be concurrent safe with itself and all the other
|
||||
// Exporter methods.
|
||||
Temporality(InstrumentKind) metricdata.Temporality
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// Aggregation returns the Aggregation to use for an instrument kind.
|
||||
//
|
||||
// This method needs to be concurrent safe with itself and all the other
|
||||
// Exporter methods.
|
||||
Aggregation(InstrumentKind) Aggregation
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// Export serializes and transmits metric data to a receiver.
|
||||
//
|
||||
// This is called synchronously, there is no concurrency safety
|
||||
// requirement. Because of this, it is critical that all timeouts and
|
||||
// cancellations of the passed context be honored.
|
||||
//
|
||||
// All retry logic must be contained in this function. The SDK does not
|
||||
// implement any retry logic. All errors returned by this function are
|
||||
// considered unrecoverable and will be reported to a configured error
|
||||
// Handler.
|
||||
//
|
||||
// The passed ResourceMetrics may be reused when the call completes. If an
|
||||
// exporter needs to hold this data after it returns, it needs to make a
|
||||
// copy.
|
||||
Export(context.Context, *metricdata.ResourceMetrics) error
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// ForceFlush flushes any metric data held by an exporter.
|
||||
//
|
||||
// The deadline or cancellation of the passed context must be honored. An
|
||||
// appropriate error should be returned in these situations.
|
||||
//
|
||||
// This method needs to be concurrent safe.
|
||||
ForceFlush(context.Context) error
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// Shutdown flushes all metric data held by an exporter and releases any
|
||||
// held computational resources.
|
||||
//
|
||||
// The deadline or cancellation of the passed context must be honored. An
|
||||
// appropriate error should be returned in these situations.
|
||||
//
|
||||
// After Shutdown is called, calls to Export will perform no operation and
|
||||
// instead will return an error indicating the shutdown state.
|
||||
//
|
||||
// This method needs to be concurrent safe.
|
||||
Shutdown(context.Context) error
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
}
|
340
vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
generated
vendored
Normal file
340
vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go
generated
vendored
Normal file
@@ -0,0 +1,340 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate stringer -type=InstrumentKind -trimprefix=InstrumentKind
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/embedded"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
)
|
||||
|
||||
var (
|
||||
zeroInstrumentKind InstrumentKind
|
||||
zeroScope instrumentation.Scope
|
||||
)
|
||||
|
||||
// InstrumentKind is the identifier of a group of instruments that all
|
||||
// performing the same function.
|
||||
type InstrumentKind uint8
|
||||
|
||||
const (
|
||||
// instrumentKindUndefined is an undefined instrument kind, it should not
|
||||
// be used by any initialized type.
|
||||
instrumentKindUndefined InstrumentKind = iota // nolint:deadcode,varcheck,unused
|
||||
// InstrumentKindCounter identifies a group of instruments that record
|
||||
// increasing values synchronously with the code path they are measuring.
|
||||
InstrumentKindCounter
|
||||
// InstrumentKindUpDownCounter identifies a group of instruments that
|
||||
// record increasing and decreasing values synchronously with the code path
|
||||
// they are measuring.
|
||||
InstrumentKindUpDownCounter
|
||||
// InstrumentKindHistogram identifies a group of instruments that record a
|
||||
// distribution of values synchronously with the code path they are
|
||||
// measuring.
|
||||
InstrumentKindHistogram
|
||||
// InstrumentKindObservableCounter identifies a group of instruments that
|
||||
// record increasing values in an asynchronous callback.
|
||||
InstrumentKindObservableCounter
|
||||
// InstrumentKindObservableUpDownCounter identifies a group of instruments
|
||||
// that record increasing and decreasing values in an asynchronous
|
||||
// callback.
|
||||
InstrumentKindObservableUpDownCounter
|
||||
// InstrumentKindObservableGauge identifies a group of instruments that
|
||||
// record current values in an asynchronous callback.
|
||||
InstrumentKindObservableGauge
|
||||
)
|
||||
|
||||
type nonComparable [0]func() // nolint: unused // This is indeed used.
|
||||
|
||||
// Instrument describes properties an instrument is created with.
|
||||
type Instrument struct {
|
||||
// Name is the human-readable identifier of the instrument.
|
||||
Name string
|
||||
// Description describes the purpose of the instrument.
|
||||
Description string
|
||||
// Kind defines the functional group of the instrument.
|
||||
Kind InstrumentKind
|
||||
// Unit is the unit of measurement recorded by the instrument.
|
||||
Unit string
|
||||
// Scope identifies the instrumentation that created the instrument.
|
||||
Scope instrumentation.Scope
|
||||
|
||||
// Ensure forward compatibility if non-comparable fields need to be added.
|
||||
nonComparable // nolint: unused
|
||||
}
|
||||
|
||||
// empty returns if all fields of i are their zero-value.
|
||||
func (i Instrument) empty() bool {
|
||||
return i.Name == "" &&
|
||||
i.Description == "" &&
|
||||
i.Kind == zeroInstrumentKind &&
|
||||
i.Unit == "" &&
|
||||
i.Scope == zeroScope
|
||||
}
|
||||
|
||||
// matches returns whether all the non-zero-value fields of i match the
|
||||
// corresponding fields of other. If i is empty it will match all other, and
|
||||
// true will always be returned.
|
||||
func (i Instrument) matches(other Instrument) bool {
|
||||
return i.matchesName(other) &&
|
||||
i.matchesDescription(other) &&
|
||||
i.matchesKind(other) &&
|
||||
i.matchesUnit(other) &&
|
||||
i.matchesScope(other)
|
||||
}
|
||||
|
||||
// matchesName returns true if the Name of i is "" or it equals the Name of
|
||||
// other, otherwise false.
|
||||
func (i Instrument) matchesName(other Instrument) bool {
|
||||
return i.Name == "" || i.Name == other.Name
|
||||
}
|
||||
|
||||
// matchesDescription returns true if the Description of i is "" or it equals
|
||||
// the Description of other, otherwise false.
|
||||
func (i Instrument) matchesDescription(other Instrument) bool {
|
||||
return i.Description == "" || i.Description == other.Description
|
||||
}
|
||||
|
||||
// matchesKind returns true if the Kind of i is its zero-value or it equals the
|
||||
// Kind of other, otherwise false.
|
||||
func (i Instrument) matchesKind(other Instrument) bool {
|
||||
return i.Kind == zeroInstrumentKind || i.Kind == other.Kind
|
||||
}
|
||||
|
||||
// matchesUnit returns true if the Unit of i is its zero-value or it equals the
|
||||
// Unit of other, otherwise false.
|
||||
func (i Instrument) matchesUnit(other Instrument) bool {
|
||||
return i.Unit == "" || i.Unit == other.Unit
|
||||
}
|
||||
|
||||
// matchesScope returns true if the Scope of i is its zero-value or it equals
|
||||
// the Scope of other, otherwise false.
|
||||
func (i Instrument) matchesScope(other Instrument) bool {
|
||||
return (i.Scope.Name == "" || i.Scope.Name == other.Scope.Name) &&
|
||||
(i.Scope.Version == "" || i.Scope.Version == other.Scope.Version) &&
|
||||
(i.Scope.SchemaURL == "" || i.Scope.SchemaURL == other.Scope.SchemaURL)
|
||||
}
|
||||
|
||||
// Stream describes the stream of data an instrument produces.
|
||||
type Stream struct {
|
||||
// Name is the human-readable identifier of the stream.
|
||||
Name string
|
||||
// Description describes the purpose of the data.
|
||||
Description string
|
||||
// Unit is the unit of measurement recorded.
|
||||
Unit string
|
||||
// Aggregation the stream uses for an instrument.
|
||||
Aggregation Aggregation
|
||||
// AttributeFilter is an attribute Filter applied to the attributes
|
||||
// recorded for an instrument's measurement. If the filter returns false
|
||||
// the attribute will not be recorded, otherwise, if it returns true, it
|
||||
// will record the attribute.
|
||||
//
|
||||
// Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to
|
||||
// provide an allow-list of attribute keys here.
|
||||
AttributeFilter attribute.Filter
|
||||
}
|
||||
|
||||
// instID are the identifying properties of a instrument.
|
||||
type instID struct {
|
||||
// Name is the name of the stream.
|
||||
Name string
|
||||
// Description is the description of the stream.
|
||||
Description string
|
||||
// Kind defines the functional group of the instrument.
|
||||
Kind InstrumentKind
|
||||
// Unit is the unit of the stream.
|
||||
Unit string
|
||||
// Number is the number type of the stream.
|
||||
Number string
|
||||
}
|
||||
|
||||
// Returns a normalized copy of the instID i.
|
||||
//
|
||||
// Instrument names are considered case-insensitive. Standardize the instrument
|
||||
// name to always be lowercase for the returned instID so it can be compared
|
||||
// without the name casing affecting the comparison.
|
||||
func (i instID) normalize() instID {
|
||||
i.Name = strings.ToLower(i.Name)
|
||||
return i
|
||||
}
|
||||
|
||||
type int64Inst struct {
|
||||
measures []aggregate.Measure[int64]
|
||||
|
||||
embedded.Int64Counter
|
||||
embedded.Int64UpDownCounter
|
||||
embedded.Int64Histogram
|
||||
}
|
||||
|
||||
var _ metric.Int64Counter = (*int64Inst)(nil)
|
||||
var _ metric.Int64UpDownCounter = (*int64Inst)(nil)
|
||||
var _ metric.Int64Histogram = (*int64Inst)(nil)
|
||||
|
||||
func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) {
|
||||
c := metric.NewAddConfig(opts)
|
||||
i.aggregate(ctx, val, c.Attributes())
|
||||
}
|
||||
|
||||
func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.RecordOption) {
|
||||
c := metric.NewRecordConfig(opts)
|
||||
i.aggregate(ctx, val, c.Attributes())
|
||||
}
|
||||
|
||||
func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return
|
||||
}
|
||||
for _, in := range i.measures {
|
||||
in(ctx, val, s)
|
||||
}
|
||||
}
|
||||
|
||||
type float64Inst struct {
|
||||
measures []aggregate.Measure[float64]
|
||||
|
||||
embedded.Float64Counter
|
||||
embedded.Float64UpDownCounter
|
||||
embedded.Float64Histogram
|
||||
}
|
||||
|
||||
var _ metric.Float64Counter = (*float64Inst)(nil)
|
||||
var _ metric.Float64UpDownCounter = (*float64Inst)(nil)
|
||||
var _ metric.Float64Histogram = (*float64Inst)(nil)
|
||||
|
||||
func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) {
|
||||
c := metric.NewAddConfig(opts)
|
||||
i.aggregate(ctx, val, c.Attributes())
|
||||
}
|
||||
|
||||
func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.RecordOption) {
|
||||
c := metric.NewRecordConfig(opts)
|
||||
i.aggregate(ctx, val, c.Attributes())
|
||||
}
|
||||
|
||||
func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return
|
||||
}
|
||||
for _, in := range i.measures {
|
||||
in(ctx, val, s)
|
||||
}
|
||||
}
|
||||
|
||||
// observablID is a comparable unique identifier of an observable.
|
||||
type observablID[N int64 | float64] struct {
|
||||
name string
|
||||
description string
|
||||
kind InstrumentKind
|
||||
unit string
|
||||
scope instrumentation.Scope
|
||||
}
|
||||
|
||||
type float64Observable struct {
|
||||
metric.Float64Observable
|
||||
*observable[float64]
|
||||
|
||||
embedded.Float64ObservableCounter
|
||||
embedded.Float64ObservableUpDownCounter
|
||||
embedded.Float64ObservableGauge
|
||||
}
|
||||
|
||||
var _ metric.Float64ObservableCounter = float64Observable{}
|
||||
var _ metric.Float64ObservableUpDownCounter = float64Observable{}
|
||||
var _ metric.Float64ObservableGauge = float64Observable{}
|
||||
|
||||
func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string, meas []aggregate.Measure[float64]) float64Observable {
|
||||
return float64Observable{
|
||||
observable: newObservable(m, kind, name, desc, u, meas),
|
||||
}
|
||||
}
|
||||
|
||||
type int64Observable struct {
|
||||
metric.Int64Observable
|
||||
*observable[int64]
|
||||
|
||||
embedded.Int64ObservableCounter
|
||||
embedded.Int64ObservableUpDownCounter
|
||||
embedded.Int64ObservableGauge
|
||||
}
|
||||
|
||||
var _ metric.Int64ObservableCounter = int64Observable{}
|
||||
var _ metric.Int64ObservableUpDownCounter = int64Observable{}
|
||||
var _ metric.Int64ObservableGauge = int64Observable{}
|
||||
|
||||
func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string, meas []aggregate.Measure[int64]) int64Observable {
|
||||
return int64Observable{
|
||||
observable: newObservable(m, kind, name, desc, u, meas),
|
||||
}
|
||||
}
|
||||
|
||||
type observable[N int64 | float64] struct {
|
||||
metric.Observable
|
||||
observablID[N]
|
||||
|
||||
meter *meter
|
||||
measures []aggregate.Measure[N]
|
||||
}
|
||||
|
||||
func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string, meas []aggregate.Measure[N]) *observable[N] {
|
||||
return &observable[N]{
|
||||
observablID: observablID[N]{
|
||||
name: name,
|
||||
description: desc,
|
||||
kind: kind,
|
||||
unit: u,
|
||||
scope: m.scope,
|
||||
},
|
||||
meter: m,
|
||||
measures: meas,
|
||||
}
|
||||
}
|
||||
|
||||
// observe records the val for the set of attrs.
|
||||
func (o *observable[N]) observe(val N, s attribute.Set) {
|
||||
for _, in := range o.measures {
|
||||
in(context.Background(), val, s)
|
||||
}
|
||||
}
|
||||
|
||||
var errEmptyAgg = errors.New("no aggregators for observable instrument")
|
||||
|
||||
// registerable returns an error if the observable o should not be registered,
|
||||
// and nil if it should. An errEmptyAgg error is returned if o is effectively a
|
||||
// no-op because it does not have any aggregators. Also, an error is returned
|
||||
// if scope defines a Meter other than the one o was created by.
|
||||
func (o *observable[N]) registerable(m *meter) error {
|
||||
if len(o.measures) == 0 {
|
||||
return errEmptyAgg
|
||||
}
|
||||
if m != o.meter {
|
||||
return fmt.Errorf(
|
||||
"invalid registration: observable %q from Meter %q, registered with Meter %q",
|
||||
o.name,
|
||||
o.scope.Name,
|
||||
m.scope.Name,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
29
vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
generated
vendored
Normal file
29
vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Code generated by "stringer -type=InstrumentKind -trimprefix=InstrumentKind"; DO NOT EDIT.
|
||||
|
||||
package metric
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[instrumentKindUndefined-0]
|
||||
_ = x[InstrumentKindCounter-1]
|
||||
_ = x[InstrumentKindUpDownCounter-2]
|
||||
_ = x[InstrumentKindHistogram-3]
|
||||
_ = x[InstrumentKindObservableCounter-4]
|
||||
_ = x[InstrumentKindObservableUpDownCounter-5]
|
||||
_ = x[InstrumentKindObservableGauge-6]
|
||||
}
|
||||
|
||||
const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGauge"
|
||||
|
||||
var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107}
|
||||
|
||||
func (i InstrumentKind) String() string {
|
||||
if i >= InstrumentKind(len(_InstrumentKind_index)-1) {
|
||||
return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]]
|
||||
}
|
133
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
generated
vendored
Normal file
133
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// now is used to return the current local time while allowing tests to
|
||||
// override the default time.Now function.
|
||||
var now = time.Now
|
||||
|
||||
// Measure receives measurements to be aggregated.
|
||||
type Measure[N int64 | float64] func(context.Context, N, attribute.Set)
|
||||
|
||||
// ComputeAggregation stores the aggregate of measurements into dest and
|
||||
// returns the number of aggregate data-points output.
|
||||
type ComputeAggregation func(dest *metricdata.Aggregation) int
|
||||
|
||||
// Builder builds an aggregate function.
|
||||
type Builder[N int64 | float64] struct {
|
||||
// Temporality is the temporality used for the returned aggregate function.
|
||||
//
|
||||
// If this is not provided a default of cumulative will be used (except for
|
||||
// the last-value aggregate function where delta is the only appropriate
|
||||
// temporality).
|
||||
Temporality metricdata.Temporality
|
||||
// Filter is the attribute filter the aggregate function will use on the
|
||||
// input of measurements.
|
||||
Filter attribute.Filter
|
||||
}
|
||||
|
||||
func (b Builder[N]) filter(f Measure[N]) Measure[N] {
|
||||
if b.Filter != nil {
|
||||
fltr := b.Filter // Copy to make it immutable after assignment.
|
||||
return func(ctx context.Context, n N, a attribute.Set) {
|
||||
fAttr, _ := a.Filter(fltr)
|
||||
f(ctx, n, fAttr)
|
||||
}
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// LastValue returns a last-value aggregate function input and output.
|
||||
//
|
||||
// The Builder.Temporality is ignored and delta is use always.
|
||||
func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) {
|
||||
// Delta temporality is the only temporality that makes semantic sense for
|
||||
// a last-value aggregate.
|
||||
lv := newLastValue[N]()
|
||||
|
||||
return b.filter(lv.measure), func(dest *metricdata.Aggregation) int {
|
||||
// Ignore if dest is not a metricdata.Gauge. The chance for memory
|
||||
// reuse of the DataPoints is missed (better luck next time).
|
||||
gData, _ := (*dest).(metricdata.Gauge[N])
|
||||
lv.computeAggregation(&gData.DataPoints)
|
||||
*dest = gData
|
||||
|
||||
return len(gData.DataPoints)
|
||||
}
|
||||
}
|
||||
|
||||
// PrecomputedSum returns a sum aggregate function input and output. The
|
||||
// arguments passed to the input are expected to be the precomputed sum values.
|
||||
func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) {
|
||||
s := newPrecomputedSum[N](monotonic)
|
||||
switch b.Temporality {
|
||||
case metricdata.DeltaTemporality:
|
||||
return b.filter(s.measure), s.delta
|
||||
default:
|
||||
return b.filter(s.measure), s.cumulative
|
||||
}
|
||||
}
|
||||
|
||||
// Sum returns a sum aggregate function input and output.
|
||||
func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) {
|
||||
s := newSum[N](monotonic)
|
||||
switch b.Temporality {
|
||||
case metricdata.DeltaTemporality:
|
||||
return b.filter(s.measure), s.delta
|
||||
default:
|
||||
return b.filter(s.measure), s.cumulative
|
||||
}
|
||||
}
|
||||
|
||||
// ExplicitBucketHistogram returns a histogram aggregate function input and
|
||||
// output.
|
||||
func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
|
||||
h := newHistogram[N](boundaries, noMinMax, noSum)
|
||||
switch b.Temporality {
|
||||
case metricdata.DeltaTemporality:
|
||||
return b.filter(h.measure), h.delta
|
||||
default:
|
||||
return b.filter(h.measure), h.cumulative
|
||||
}
|
||||
}
|
||||
|
||||
// ExponentialBucketHistogram returns a histogram aggregate function input and
|
||||
// output.
|
||||
func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) {
|
||||
h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum)
|
||||
switch b.Temporality {
|
||||
case metricdata.DeltaTemporality:
|
||||
return b.filter(h.measure), h.delta
|
||||
default:
|
||||
return b.filter(h.measure), h.cumulative
|
||||
}
|
||||
}
|
||||
|
||||
// reset ensures s has capacity and sets it length. If the capacity of s too
|
||||
// small, a new slice is returned with the specified capacity and length.
|
||||
func reset[T any](s []T, length, capacity int) []T {
|
||||
if cap(s) < capacity {
|
||||
return make([]T, length, capacity)
|
||||
}
|
||||
return s[:length]
|
||||
}
|
18
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go
generated
vendored
Normal file
18
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/doc.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package aggregate provides aggregate types used compute aggregations and
|
||||
// cycle the state of metric measurements made by the SDK. These types and
|
||||
// functionality are meant only for internal SDK use.
|
||||
package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
432
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
generated
vendored
Normal file
432
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go
generated
vendored
Normal file
@@ -0,0 +1,432 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
const (
|
||||
expoMaxScale = 20
|
||||
expoMinScale = -10
|
||||
|
||||
smallestNonZeroNormalFloat64 = 0x1p-1022
|
||||
|
||||
// These redefine the Math constants with a type, so the compiler won't coerce
|
||||
// them into an int on 32 bit platforms.
|
||||
maxInt64 int64 = math.MaxInt64
|
||||
minInt64 int64 = math.MinInt64
|
||||
)
|
||||
|
||||
// expoHistogramDataPoint is a single data point in an exponential histogram.
|
||||
type expoHistogramDataPoint[N int64 | float64] struct {
|
||||
count uint64
|
||||
min N
|
||||
max N
|
||||
sum N
|
||||
|
||||
maxSize int
|
||||
noMinMax bool
|
||||
noSum bool
|
||||
|
||||
scale int
|
||||
|
||||
posBuckets expoBuckets
|
||||
negBuckets expoBuckets
|
||||
zeroCount uint64
|
||||
}
|
||||
|
||||
func newExpoHistogramDataPoint[N int64 | float64](maxSize, maxScale int, noMinMax, noSum bool) *expoHistogramDataPoint[N] {
|
||||
f := math.MaxFloat64
|
||||
max := N(f) // if N is int64, max will overflow to -9223372036854775808
|
||||
min := N(-f)
|
||||
if N(maxInt64) > N(f) {
|
||||
max = N(maxInt64)
|
||||
min = N(minInt64)
|
||||
}
|
||||
return &expoHistogramDataPoint[N]{
|
||||
min: max,
|
||||
max: min,
|
||||
maxSize: maxSize,
|
||||
noMinMax: noMinMax,
|
||||
noSum: noSum,
|
||||
scale: maxScale,
|
||||
}
|
||||
}
|
||||
|
||||
// record adds a new measurement to the histogram. It will rescale the buckets if needed.
|
||||
func (p *expoHistogramDataPoint[N]) record(v N) {
|
||||
p.count++
|
||||
|
||||
if !p.noMinMax {
|
||||
if v < p.min {
|
||||
p.min = v
|
||||
}
|
||||
if v > p.max {
|
||||
p.max = v
|
||||
}
|
||||
}
|
||||
if !p.noSum {
|
||||
p.sum += v
|
||||
}
|
||||
|
||||
absV := math.Abs(float64(v))
|
||||
|
||||
if float64(absV) == 0.0 {
|
||||
p.zeroCount++
|
||||
return
|
||||
}
|
||||
|
||||
bin := p.getBin(absV)
|
||||
|
||||
bucket := &p.posBuckets
|
||||
if v < 0 {
|
||||
bucket = &p.negBuckets
|
||||
}
|
||||
|
||||
// If the new bin would make the counts larger than maxScale, we need to
|
||||
// downscale current measurements.
|
||||
if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 {
|
||||
if p.scale-scaleDelta < expoMinScale {
|
||||
// With a scale of -10 there is only two buckets for the whole range of float64 values.
|
||||
// This can only happen if there is a max size of 1.
|
||||
otel.Handle(errors.New("exponential histogram scale underflow"))
|
||||
return
|
||||
}
|
||||
//Downscale
|
||||
p.scale -= scaleDelta
|
||||
p.posBuckets.downscale(scaleDelta)
|
||||
p.negBuckets.downscale(scaleDelta)
|
||||
|
||||
bin = p.getBin(absV)
|
||||
}
|
||||
|
||||
bucket.record(bin)
|
||||
}
|
||||
|
||||
// getBin returns the bin v should be recorded into.
|
||||
func (p *expoHistogramDataPoint[N]) getBin(v float64) int {
|
||||
frac, exp := math.Frexp(v)
|
||||
if p.scale <= 0 {
|
||||
// Because of the choice of fraction is always 1 power of two higher than we want.
|
||||
correction := 1
|
||||
if frac == .5 {
|
||||
// If v is an exact power of two the frac will be .5 and the exp
|
||||
// will be one higher than we want.
|
||||
correction = 2
|
||||
}
|
||||
return (exp - correction) >> (-p.scale)
|
||||
}
|
||||
return exp<<p.scale + int(math.Log(frac)*scaleFactors[p.scale]) - 1
|
||||
}
|
||||
|
||||
// scaleFactors are constants used in calculating the logarithm index. They are
|
||||
// equivalent to 2^index/log(2).
|
||||
var scaleFactors = [21]float64{
|
||||
math.Ldexp(math.Log2E, 0),
|
||||
math.Ldexp(math.Log2E, 1),
|
||||
math.Ldexp(math.Log2E, 2),
|
||||
math.Ldexp(math.Log2E, 3),
|
||||
math.Ldexp(math.Log2E, 4),
|
||||
math.Ldexp(math.Log2E, 5),
|
||||
math.Ldexp(math.Log2E, 6),
|
||||
math.Ldexp(math.Log2E, 7),
|
||||
math.Ldexp(math.Log2E, 8),
|
||||
math.Ldexp(math.Log2E, 9),
|
||||
math.Ldexp(math.Log2E, 10),
|
||||
math.Ldexp(math.Log2E, 11),
|
||||
math.Ldexp(math.Log2E, 12),
|
||||
math.Ldexp(math.Log2E, 13),
|
||||
math.Ldexp(math.Log2E, 14),
|
||||
math.Ldexp(math.Log2E, 15),
|
||||
math.Ldexp(math.Log2E, 16),
|
||||
math.Ldexp(math.Log2E, 17),
|
||||
math.Ldexp(math.Log2E, 18),
|
||||
math.Ldexp(math.Log2E, 19),
|
||||
math.Ldexp(math.Log2E, 20),
|
||||
}
|
||||
|
||||
// scaleChange returns the magnitude of the scale change needed to fit bin in
|
||||
// the bucket. If no scale change is needed 0 is returned.
|
||||
func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin, length int) int {
|
||||
if length == 0 {
|
||||
// No need to rescale if there are no buckets.
|
||||
return 0
|
||||
}
|
||||
|
||||
low := startBin
|
||||
high := bin
|
||||
if startBin >= bin {
|
||||
low = bin
|
||||
high = startBin + length - 1
|
||||
}
|
||||
|
||||
count := 0
|
||||
for high-low >= p.maxSize {
|
||||
low = low >> 1
|
||||
high = high >> 1
|
||||
count++
|
||||
if count > expoMaxScale-expoMinScale {
|
||||
return count
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// expoBuckets is a set of buckets in an exponential histogram.
|
||||
type expoBuckets struct {
|
||||
startBin int
|
||||
counts []uint64
|
||||
}
|
||||
|
||||
// record increments the count for the given bin, and expands the buckets if needed.
|
||||
// Size changes must be done before calling this function.
|
||||
func (b *expoBuckets) record(bin int) {
|
||||
if len(b.counts) == 0 {
|
||||
b.counts = []uint64{1}
|
||||
b.startBin = bin
|
||||
return
|
||||
}
|
||||
|
||||
endBin := b.startBin + len(b.counts) - 1
|
||||
|
||||
// if the new bin is inside the current range
|
||||
if bin >= b.startBin && bin <= endBin {
|
||||
b.counts[bin-b.startBin]++
|
||||
return
|
||||
}
|
||||
// if the new bin is before the current start add spaces to the counts
|
||||
if bin < b.startBin {
|
||||
origLen := len(b.counts)
|
||||
newLength := endBin - bin + 1
|
||||
shift := b.startBin - bin
|
||||
|
||||
if newLength > cap(b.counts) {
|
||||
b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...)
|
||||
}
|
||||
|
||||
copy(b.counts[shift:origLen+shift], b.counts[:])
|
||||
b.counts = b.counts[:newLength]
|
||||
for i := 1; i < shift; i++ {
|
||||
b.counts[i] = 0
|
||||
}
|
||||
b.startBin = bin
|
||||
b.counts[0] = 1
|
||||
return
|
||||
}
|
||||
// if the new is after the end add spaces to the end
|
||||
if bin > endBin {
|
||||
if bin-b.startBin < cap(b.counts) {
|
||||
b.counts = b.counts[:bin-b.startBin+1]
|
||||
for i := endBin + 1 - b.startBin; i < len(b.counts); i++ {
|
||||
b.counts[i] = 0
|
||||
}
|
||||
b.counts[bin-b.startBin] = 1
|
||||
return
|
||||
}
|
||||
|
||||
end := make([]uint64, bin-b.startBin-len(b.counts)+1)
|
||||
b.counts = append(b.counts, end...)
|
||||
b.counts[bin-b.startBin] = 1
|
||||
}
|
||||
}
|
||||
|
||||
// downscale shrinks a bucket by a factor of 2*s. It will sum counts into the
|
||||
// correct lower resolution bucket.
|
||||
func (b *expoBuckets) downscale(delta int) {
|
||||
// Example
|
||||
// delta = 2
|
||||
// Original offset: -6
|
||||
// Counts: [ 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
|
||||
// bins: -6 -5, -4, -3, -2, -1, 0, 1, 2, 3, 4
|
||||
// new bins:-2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1
|
||||
// new Offset: -2
|
||||
// new Counts: [4, 14, 30, 10]
|
||||
|
||||
if len(b.counts) <= 1 || delta < 1 {
|
||||
b.startBin = b.startBin >> delta
|
||||
return
|
||||
}
|
||||
|
||||
steps := 1 << delta
|
||||
offset := b.startBin % steps
|
||||
offset = (offset + steps) % steps // to make offset positive
|
||||
for i := 1; i < len(b.counts); i++ {
|
||||
idx := i + offset
|
||||
if idx%steps == 0 {
|
||||
b.counts[idx/steps] = b.counts[i]
|
||||
continue
|
||||
}
|
||||
b.counts[idx/steps] += b.counts[i]
|
||||
}
|
||||
|
||||
lastIdx := (len(b.counts) - 1 + offset) / steps
|
||||
b.counts = b.counts[:lastIdx+1]
|
||||
b.startBin = b.startBin >> delta
|
||||
}
|
||||
|
||||
// newExponentialHistogram returns an Aggregator that summarizes a set of
|
||||
// measurements as an exponential histogram. Each histogram is scoped by attributes
|
||||
// and the aggregation cycle the measurements were made in.
|
||||
func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool) *expoHistogram[N] {
|
||||
return &expoHistogram[N]{
|
||||
noSum: noSum,
|
||||
noMinMax: noMinMax,
|
||||
maxSize: int(maxSize),
|
||||
maxScale: int(maxScale),
|
||||
|
||||
values: make(map[attribute.Set]*expoHistogramDataPoint[N]),
|
||||
|
||||
start: now(),
|
||||
}
|
||||
}
|
||||
|
||||
// expoHistogram summarizes a set of measurements as an histogram with exponentially
|
||||
// defined buckets.
|
||||
type expoHistogram[N int64 | float64] struct {
|
||||
noSum bool
|
||||
noMinMax bool
|
||||
maxSize int
|
||||
maxScale int
|
||||
|
||||
values map[attribute.Set]*expoHistogramDataPoint[N]
|
||||
valuesMu sync.Mutex
|
||||
|
||||
start time.Time
|
||||
}
|
||||
|
||||
func (e *expoHistogram[N]) measure(_ context.Context, value N, attr attribute.Set) {
|
||||
// Ignore NaN and infinity.
|
||||
if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) {
|
||||
return
|
||||
}
|
||||
|
||||
e.valuesMu.Lock()
|
||||
defer e.valuesMu.Unlock()
|
||||
|
||||
v, ok := e.values[attr]
|
||||
if !ok {
|
||||
v = newExpoHistogramDataPoint[N](e.maxSize, e.maxScale, e.noMinMax, e.noSum)
|
||||
e.values[attr] = v
|
||||
}
|
||||
v.record(value)
|
||||
}
|
||||
|
||||
func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int {
|
||||
t := now()
|
||||
|
||||
// If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
|
||||
// In that case, use the zero-value h and hope for better alignment next cycle.
|
||||
h, _ := (*dest).(metricdata.ExponentialHistogram[N])
|
||||
h.Temporality = metricdata.DeltaTemporality
|
||||
|
||||
e.valuesMu.Lock()
|
||||
defer e.valuesMu.Unlock()
|
||||
|
||||
n := len(e.values)
|
||||
hDPts := reset(h.DataPoints, n, n)
|
||||
|
||||
var i int
|
||||
for a, b := range e.values {
|
||||
hDPts[i].Attributes = a
|
||||
hDPts[i].StartTime = e.start
|
||||
hDPts[i].Time = t
|
||||
hDPts[i].Count = b.count
|
||||
hDPts[i].Scale = int32(b.scale)
|
||||
hDPts[i].ZeroCount = b.zeroCount
|
||||
hDPts[i].ZeroThreshold = 0.0
|
||||
|
||||
hDPts[i].PositiveBucket.Offset = int32(b.posBuckets.startBin)
|
||||
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(b.posBuckets.counts), len(b.posBuckets.counts))
|
||||
copy(hDPts[i].PositiveBucket.Counts, b.posBuckets.counts)
|
||||
|
||||
hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin)
|
||||
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts))
|
||||
|
||||
if !e.noSum {
|
||||
hDPts[i].Sum = b.sum
|
||||
}
|
||||
if !e.noMinMax {
|
||||
hDPts[i].Min = metricdata.NewExtrema(b.min)
|
||||
hDPts[i].Max = metricdata.NewExtrema(b.max)
|
||||
}
|
||||
|
||||
delete(e.values, a)
|
||||
i++
|
||||
}
|
||||
e.start = t
|
||||
h.DataPoints = hDPts
|
||||
*dest = h
|
||||
return n
|
||||
}
|
||||
|
||||
func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
t := now()
|
||||
|
||||
// If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed.
|
||||
// In that case, use the zero-value h and hope for better alignment next cycle.
|
||||
h, _ := (*dest).(metricdata.ExponentialHistogram[N])
|
||||
h.Temporality = metricdata.CumulativeTemporality
|
||||
|
||||
e.valuesMu.Lock()
|
||||
defer e.valuesMu.Unlock()
|
||||
|
||||
n := len(e.values)
|
||||
hDPts := reset(h.DataPoints, n, n)
|
||||
|
||||
var i int
|
||||
for a, b := range e.values {
|
||||
hDPts[i].Attributes = a
|
||||
hDPts[i].StartTime = e.start
|
||||
hDPts[i].Time = t
|
||||
hDPts[i].Count = b.count
|
||||
hDPts[i].Scale = int32(b.scale)
|
||||
hDPts[i].ZeroCount = b.zeroCount
|
||||
hDPts[i].ZeroThreshold = 0.0
|
||||
|
||||
hDPts[i].PositiveBucket.Offset = int32(b.posBuckets.startBin)
|
||||
hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(b.posBuckets.counts), len(b.posBuckets.counts))
|
||||
copy(hDPts[i].PositiveBucket.Counts, b.posBuckets.counts)
|
||||
|
||||
hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin)
|
||||
hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts))
|
||||
|
||||
if !e.noSum {
|
||||
hDPts[i].Sum = b.sum
|
||||
}
|
||||
if !e.noMinMax {
|
||||
hDPts[i].Min = metricdata.NewExtrema(b.min)
|
||||
hDPts[i].Max = metricdata.NewExtrema(b.max)
|
||||
}
|
||||
|
||||
i++
|
||||
// TODO (#3006): This will use an unbounded amount of memory if there
|
||||
// are unbounded number of attribute sets being aggregated. Attribute
|
||||
// sets that become "stale" need to be forgotten so this will not
|
||||
// overload the system.
|
||||
}
|
||||
|
||||
h.DataPoints = hDPts
|
||||
*dest = h
|
||||
return n
|
||||
}
|
231
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
generated
vendored
Normal file
231
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go
generated
vendored
Normal file
@@ -0,0 +1,231 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
type buckets[N int64 | float64] struct {
|
||||
counts []uint64
|
||||
count uint64
|
||||
total N
|
||||
min, max N
|
||||
}
|
||||
|
||||
// newBuckets returns buckets with n bins.
|
||||
func newBuckets[N int64 | float64](n int) *buckets[N] {
|
||||
return &buckets[N]{counts: make([]uint64, n)}
|
||||
}
|
||||
|
||||
func (b *buckets[N]) sum(value N) { b.total += value }
|
||||
|
||||
func (b *buckets[N]) bin(idx int, value N) {
|
||||
b.counts[idx]++
|
||||
b.count++
|
||||
if value < b.min {
|
||||
b.min = value
|
||||
} else if value > b.max {
|
||||
b.max = value
|
||||
}
|
||||
}
|
||||
|
||||
// histValues summarizes a set of measurements as an histValues with
|
||||
// explicitly defined buckets.
|
||||
type histValues[N int64 | float64] struct {
|
||||
noSum bool
|
||||
bounds []float64
|
||||
|
||||
values map[attribute.Set]*buckets[N]
|
||||
valuesMu sync.Mutex
|
||||
}
|
||||
|
||||
func newHistValues[N int64 | float64](bounds []float64, noSum bool) *histValues[N] {
|
||||
// The responsibility of keeping all buckets correctly associated with the
|
||||
// passed boundaries is ultimately this type's responsibility. Make a copy
|
||||
// here so we can always guarantee this. Or, in the case of failure, have
|
||||
// complete control over the fix.
|
||||
b := make([]float64, len(bounds))
|
||||
copy(b, bounds)
|
||||
sort.Float64s(b)
|
||||
return &histValues[N]{
|
||||
noSum: noSum,
|
||||
bounds: b,
|
||||
values: make(map[attribute.Set]*buckets[N]),
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate records the measurement value, scoped by attr, and aggregates it
|
||||
// into a histogram.
|
||||
func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set) {
|
||||
// This search will return an index in the range [0, len(s.bounds)], where
|
||||
// it will return len(s.bounds) if value is greater than the last element
|
||||
// of s.bounds. This aligns with the buckets in that the length of buckets
|
||||
// is len(s.bounds)+1, with the last bucket representing:
|
||||
// (s.bounds[len(s.bounds)-1], +∞).
|
||||
idx := sort.SearchFloat64s(s.bounds, float64(value))
|
||||
|
||||
s.valuesMu.Lock()
|
||||
defer s.valuesMu.Unlock()
|
||||
|
||||
b, ok := s.values[attr]
|
||||
if !ok {
|
||||
// N+1 buckets. For example:
|
||||
//
|
||||
// bounds = [0, 5, 10]
|
||||
//
|
||||
// Then,
|
||||
//
|
||||
// buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞)
|
||||
b = newBuckets[N](len(s.bounds) + 1)
|
||||
// Ensure min and max are recorded values (not zero), for new buckets.
|
||||
b.min, b.max = value, value
|
||||
s.values[attr] = b
|
||||
}
|
||||
b.bin(idx, value)
|
||||
if !s.noSum {
|
||||
b.sum(value)
|
||||
}
|
||||
}
|
||||
|
||||
// newHistogram returns an Aggregator that summarizes a set of measurements as
|
||||
// an histogram.
|
||||
func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool) *histogram[N] {
|
||||
return &histogram[N]{
|
||||
histValues: newHistValues[N](boundaries, noSum),
|
||||
noMinMax: noMinMax,
|
||||
start: now(),
|
||||
}
|
||||
}
|
||||
|
||||
// histogram summarizes a set of measurements as an histogram with explicitly
|
||||
// defined buckets.
|
||||
type histogram[N int64 | float64] struct {
|
||||
*histValues[N]
|
||||
|
||||
noMinMax bool
|
||||
start time.Time
|
||||
}
|
||||
|
||||
func (s *histogram[N]) delta(dest *metricdata.Aggregation) int {
|
||||
t := now()
|
||||
|
||||
// If *dest is not a metricdata.Histogram, memory reuse is missed. In that
|
||||
// case, use the zero-value h and hope for better alignment next cycle.
|
||||
h, _ := (*dest).(metricdata.Histogram[N])
|
||||
h.Temporality = metricdata.DeltaTemporality
|
||||
|
||||
s.valuesMu.Lock()
|
||||
defer s.valuesMu.Unlock()
|
||||
|
||||
// Do not allow modification of our copy of bounds.
|
||||
bounds := make([]float64, len(s.bounds))
|
||||
copy(bounds, s.bounds)
|
||||
|
||||
n := len(s.values)
|
||||
hDPts := reset(h.DataPoints, n, n)
|
||||
|
||||
var i int
|
||||
for a, b := range s.values {
|
||||
hDPts[i].Attributes = a
|
||||
hDPts[i].StartTime = s.start
|
||||
hDPts[i].Time = t
|
||||
hDPts[i].Count = b.count
|
||||
hDPts[i].Bounds = bounds
|
||||
hDPts[i].BucketCounts = b.counts
|
||||
|
||||
if !s.noSum {
|
||||
hDPts[i].Sum = b.total
|
||||
}
|
||||
|
||||
if !s.noMinMax {
|
||||
hDPts[i].Min = metricdata.NewExtrema(b.min)
|
||||
hDPts[i].Max = metricdata.NewExtrema(b.max)
|
||||
}
|
||||
|
||||
// Unused attribute sets do not report.
|
||||
delete(s.values, a)
|
||||
i++
|
||||
}
|
||||
// The delta collection cycle resets.
|
||||
s.start = t
|
||||
|
||||
h.DataPoints = hDPts
|
||||
*dest = h
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
t := now()
|
||||
|
||||
// If *dest is not a metricdata.Histogram, memory reuse is missed. In that
|
||||
// case, use the zero-value h and hope for better alignment next cycle.
|
||||
h, _ := (*dest).(metricdata.Histogram[N])
|
||||
h.Temporality = metricdata.CumulativeTemporality
|
||||
|
||||
s.valuesMu.Lock()
|
||||
defer s.valuesMu.Unlock()
|
||||
|
||||
// Do not allow modification of our copy of bounds.
|
||||
bounds := make([]float64, len(s.bounds))
|
||||
copy(bounds, s.bounds)
|
||||
|
||||
n := len(s.values)
|
||||
hDPts := reset(h.DataPoints, n, n)
|
||||
|
||||
var i int
|
||||
for a, b := range s.values {
|
||||
// The HistogramDataPoint field values returned need to be copies of
|
||||
// the buckets value as we will keep updating them.
|
||||
//
|
||||
// TODO (#3047): Making copies for bounds and counts incurs a large
|
||||
// memory allocation footprint. Alternatives should be explored.
|
||||
counts := make([]uint64, len(b.counts))
|
||||
copy(counts, b.counts)
|
||||
|
||||
hDPts[i].Attributes = a
|
||||
hDPts[i].StartTime = s.start
|
||||
hDPts[i].Time = t
|
||||
hDPts[i].Count = b.count
|
||||
hDPts[i].Bounds = bounds
|
||||
hDPts[i].BucketCounts = counts
|
||||
|
||||
if !s.noSum {
|
||||
hDPts[i].Sum = b.total
|
||||
}
|
||||
|
||||
if !s.noMinMax {
|
||||
hDPts[i].Min = metricdata.NewExtrema(b.min)
|
||||
hDPts[i].Max = metricdata.NewExtrema(b.max)
|
||||
}
|
||||
i++
|
||||
// TODO (#3006): This will use an unbounded amount of memory if there
|
||||
// are unbounded number of attribute sets being aggregated. Attribute
|
||||
// sets that become "stale" need to be forgotten so this will not
|
||||
// overload the system.
|
||||
}
|
||||
|
||||
h.DataPoints = hDPts
|
||||
*dest = h
|
||||
|
||||
return n
|
||||
}
|
68
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
generated
vendored
Normal file
68
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go
generated
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// datapoint is timestamped measurement data.
|
||||
type datapoint[N int64 | float64] struct {
|
||||
timestamp time.Time
|
||||
value N
|
||||
}
|
||||
|
||||
func newLastValue[N int64 | float64]() *lastValue[N] {
|
||||
return &lastValue[N]{values: make(map[attribute.Set]datapoint[N])}
|
||||
}
|
||||
|
||||
// lastValue summarizes a set of measurements as the last one made.
|
||||
type lastValue[N int64 | float64] struct {
|
||||
sync.Mutex
|
||||
|
||||
values map[attribute.Set]datapoint[N]
|
||||
}
|
||||
|
||||
func (s *lastValue[N]) measure(ctx context.Context, value N, attr attribute.Set) {
|
||||
d := datapoint[N]{timestamp: now(), value: value}
|
||||
s.Lock()
|
||||
s.values[attr] = d
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
n := len(s.values)
|
||||
*dest = reset(*dest, n, n)
|
||||
|
||||
var i int
|
||||
for a, v := range s.values {
|
||||
(*dest)[i].Attributes = a
|
||||
// The event time is the only meaningful timestamp, StartTime is
|
||||
// ignored.
|
||||
(*dest)[i].Time = v.timestamp
|
||||
(*dest)[i].Value = v.value
|
||||
// Do not report stale values.
|
||||
delete(s.values, a)
|
||||
i++
|
||||
}
|
||||
}
|
222
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
generated
vendored
Normal file
222
vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go
generated
vendored
Normal file
@@ -0,0 +1,222 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// valueMap is the storage for sums.
|
||||
type valueMap[N int64 | float64] struct {
|
||||
sync.Mutex
|
||||
values map[attribute.Set]N
|
||||
}
|
||||
|
||||
func newValueMap[N int64 | float64]() *valueMap[N] {
|
||||
return &valueMap[N]{values: make(map[attribute.Set]N)}
|
||||
}
|
||||
|
||||
func (s *valueMap[N]) measure(_ context.Context, value N, attr attribute.Set) {
|
||||
s.Lock()
|
||||
s.values[attr] += value
|
||||
s.Unlock()
|
||||
}
|
||||
|
||||
// newSum returns an aggregator that summarizes a set of measurements as their
|
||||
// arithmetic sum. Each sum is scoped by attributes and the aggregation cycle
|
||||
// the measurements were made in.
|
||||
func newSum[N int64 | float64](monotonic bool) *sum[N] {
|
||||
return &sum[N]{
|
||||
valueMap: newValueMap[N](),
|
||||
monotonic: monotonic,
|
||||
start: now(),
|
||||
}
|
||||
}
|
||||
|
||||
// sum summarizes a set of measurements made as their arithmetic sum.
|
||||
type sum[N int64 | float64] struct {
|
||||
*valueMap[N]
|
||||
|
||||
monotonic bool
|
||||
start time.Time
|
||||
}
|
||||
|
||||
func (s *sum[N]) delta(dest *metricdata.Aggregation) int {
|
||||
t := now()
|
||||
|
||||
// If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
|
||||
// use the zero-value sData and hope for better alignment next cycle.
|
||||
sData, _ := (*dest).(metricdata.Sum[N])
|
||||
sData.Temporality = metricdata.DeltaTemporality
|
||||
sData.IsMonotonic = s.monotonic
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
n := len(s.values)
|
||||
dPts := reset(sData.DataPoints, n, n)
|
||||
|
||||
var i int
|
||||
for attr, value := range s.values {
|
||||
dPts[i].Attributes = attr
|
||||
dPts[i].StartTime = s.start
|
||||
dPts[i].Time = t
|
||||
dPts[i].Value = value
|
||||
// Do not report stale values.
|
||||
delete(s.values, attr)
|
||||
i++
|
||||
}
|
||||
// The delta collection cycle resets.
|
||||
s.start = t
|
||||
|
||||
sData.DataPoints = dPts
|
||||
*dest = sData
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
t := now()
|
||||
|
||||
// If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
|
||||
// use the zero-value sData and hope for better alignment next cycle.
|
||||
sData, _ := (*dest).(metricdata.Sum[N])
|
||||
sData.Temporality = metricdata.CumulativeTemporality
|
||||
sData.IsMonotonic = s.monotonic
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
n := len(s.values)
|
||||
dPts := reset(sData.DataPoints, n, n)
|
||||
|
||||
var i int
|
||||
for attr, value := range s.values {
|
||||
dPts[i].Attributes = attr
|
||||
dPts[i].StartTime = s.start
|
||||
dPts[i].Time = t
|
||||
dPts[i].Value = value
|
||||
// TODO (#3006): This will use an unbounded amount of memory if there
|
||||
// are unbounded number of attribute sets being aggregated. Attribute
|
||||
// sets that become "stale" need to be forgotten so this will not
|
||||
// overload the system.
|
||||
i++
|
||||
}
|
||||
|
||||
sData.DataPoints = dPts
|
||||
*dest = sData
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// newPrecomputedSum returns an aggregator that summarizes a set of
|
||||
// observatrions as their arithmetic sum. Each sum is scoped by attributes and
|
||||
// the aggregation cycle the measurements were made in.
|
||||
func newPrecomputedSum[N int64 | float64](monotonic bool) *precomputedSum[N] {
|
||||
return &precomputedSum[N]{
|
||||
valueMap: newValueMap[N](),
|
||||
monotonic: monotonic,
|
||||
start: now(),
|
||||
}
|
||||
}
|
||||
|
||||
// precomputedSum summarizes a set of observatrions as their arithmetic sum.
|
||||
type precomputedSum[N int64 | float64] struct {
|
||||
*valueMap[N]
|
||||
|
||||
monotonic bool
|
||||
start time.Time
|
||||
|
||||
reported map[attribute.Set]N
|
||||
}
|
||||
|
||||
func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int {
|
||||
t := now()
|
||||
newReported := make(map[attribute.Set]N)
|
||||
|
||||
// If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
|
||||
// use the zero-value sData and hope for better alignment next cycle.
|
||||
sData, _ := (*dest).(metricdata.Sum[N])
|
||||
sData.Temporality = metricdata.DeltaTemporality
|
||||
sData.IsMonotonic = s.monotonic
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
n := len(s.values)
|
||||
dPts := reset(sData.DataPoints, n, n)
|
||||
|
||||
var i int
|
||||
for attr, value := range s.values {
|
||||
delta := value - s.reported[attr]
|
||||
|
||||
dPts[i].Attributes = attr
|
||||
dPts[i].StartTime = s.start
|
||||
dPts[i].Time = t
|
||||
dPts[i].Value = delta
|
||||
|
||||
newReported[attr] = value
|
||||
// Unused attribute sets do not report.
|
||||
delete(s.values, attr)
|
||||
i++
|
||||
}
|
||||
// Unused attribute sets are forgotten.
|
||||
s.reported = newReported
|
||||
// The delta collection cycle resets.
|
||||
s.start = t
|
||||
|
||||
sData.DataPoints = dPts
|
||||
*dest = sData
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int {
|
||||
t := now()
|
||||
|
||||
// If *dest is not a metricdata.Sum, memory reuse is missed. In that case,
|
||||
// use the zero-value sData and hope for better alignment next cycle.
|
||||
sData, _ := (*dest).(metricdata.Sum[N])
|
||||
sData.Temporality = metricdata.CumulativeTemporality
|
||||
sData.IsMonotonic = s.monotonic
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
n := len(s.values)
|
||||
dPts := reset(sData.DataPoints, n, n)
|
||||
|
||||
var i int
|
||||
for attr, value := range s.values {
|
||||
dPts[i].Attributes = attr
|
||||
dPts[i].StartTime = s.start
|
||||
dPts[i].Time = t
|
||||
dPts[i].Value = value
|
||||
|
||||
// Unused attribute sets do not report.
|
||||
delete(s.values, attr)
|
||||
i++
|
||||
}
|
||||
|
||||
sData.DataPoints = dPts
|
||||
*dest = sData
|
||||
|
||||
return n
|
||||
}
|
24
vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go
generated
vendored
Normal file
24
vendor/go.opentelemetry.io/otel/sdk/metric/internal/reuse_slice.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
|
||||
// ReuseSlice returns a zeroed view of slice if its capacity is greater than or
|
||||
// equal to n. Otherwise, it returns a new []T with capacity equal to n.
|
||||
func ReuseSlice[T any](slice []T, n int) []T {
|
||||
if cap(slice) >= n {
|
||||
return slice[:n]
|
||||
}
|
||||
return make([]T, n)
|
||||
}
|
214
vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
generated
vendored
Normal file
214
vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go
generated
vendored
Normal file
@@ -0,0 +1,214 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// ManualReader is a simple Reader that allows an application to
|
||||
// read metrics on demand.
|
||||
type ManualReader struct {
|
||||
sdkProducer atomic.Value
|
||||
shutdownOnce sync.Once
|
||||
|
||||
mu sync.Mutex
|
||||
isShutdown bool
|
||||
externalProducers atomic.Value
|
||||
|
||||
temporalitySelector TemporalitySelector
|
||||
aggregationSelector AggregationSelector
|
||||
}
|
||||
|
||||
// Compile time check the manualReader implements Reader and is comparable.
|
||||
var _ = map[Reader]struct{}{&ManualReader{}: {}}
|
||||
|
||||
// NewManualReader returns a Reader which is directly called to collect metrics.
|
||||
func NewManualReader(opts ...ManualReaderOption) *ManualReader {
|
||||
cfg := newManualReaderConfig(opts)
|
||||
r := &ManualReader{
|
||||
temporalitySelector: cfg.temporalitySelector,
|
||||
aggregationSelector: cfg.aggregationSelector,
|
||||
}
|
||||
r.externalProducers.Store(cfg.producers)
|
||||
return r
|
||||
}
|
||||
|
||||
// register stores the sdkProducer which enables the caller
|
||||
// to read metrics from the SDK on demand.
|
||||
func (mr *ManualReader) register(p sdkProducer) {
|
||||
// Only register once. If producer is already set, do nothing.
|
||||
if !mr.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) {
|
||||
msg := "did not register manual reader"
|
||||
global.Error(errDuplicateRegister, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// temporality reports the Temporality for the instrument kind provided.
|
||||
func (mr *ManualReader) temporality(kind InstrumentKind) metricdata.Temporality {
|
||||
return mr.temporalitySelector(kind)
|
||||
}
|
||||
|
||||
// aggregation returns what Aggregation to use for kind.
|
||||
func (mr *ManualReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type.
|
||||
return mr.aggregationSelector(kind)
|
||||
}
|
||||
|
||||
// Shutdown closes any connections and frees any resources used by the reader.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (mr *ManualReader) Shutdown(context.Context) error {
|
||||
err := ErrReaderShutdown
|
||||
mr.shutdownOnce.Do(func() {
|
||||
// Any future call to Collect will now return ErrReaderShutdown.
|
||||
mr.sdkProducer.Store(produceHolder{
|
||||
produce: shutdownProducer{}.produce,
|
||||
})
|
||||
mr.mu.Lock()
|
||||
defer mr.mu.Unlock()
|
||||
mr.isShutdown = true
|
||||
// release references to Producer(s)
|
||||
mr.externalProducers.Store([]Producer{})
|
||||
err = nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Collect gathers all metric data related to the Reader from
|
||||
// the SDK and other Producers and stores the result in rm.
|
||||
//
|
||||
// Collect will return an error if called after shutdown.
|
||||
// Collect will return an error if rm is a nil ResourceMetrics.
|
||||
// Collect will return an error if the context's Done channel is closed.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
|
||||
if rm == nil {
|
||||
return errors.New("manual reader: *metricdata.ResourceMetrics is nil")
|
||||
}
|
||||
p := mr.sdkProducer.Load()
|
||||
if p == nil {
|
||||
return ErrReaderNotRegistered
|
||||
}
|
||||
|
||||
ph, ok := p.(produceHolder)
|
||||
if !ok {
|
||||
// The atomic.Value is entirely in the periodicReader's control so
|
||||
// this should never happen. In the unforeseen case that this does
|
||||
// happen, return an error instead of panicking so a users code does
|
||||
// not halt in the processes.
|
||||
err := fmt.Errorf("manual reader: invalid producer: %T", p)
|
||||
return err
|
||||
}
|
||||
|
||||
err := ph.produce(ctx, rm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var errs []error
|
||||
for _, producer := range mr.externalProducers.Load().([]Producer) {
|
||||
externalMetrics, err := producer.Produce(ctx)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
|
||||
}
|
||||
|
||||
global.Debug("ManualReader collection", "Data", rm)
|
||||
|
||||
return unifyErrors(errs)
|
||||
}
|
||||
|
||||
// MarshalLog returns logging data about the ManualReader.
|
||||
func (r *ManualReader) MarshalLog() interface{} {
|
||||
r.mu.Lock()
|
||||
down := r.isShutdown
|
||||
r.mu.Unlock()
|
||||
return struct {
|
||||
Type string
|
||||
Registered bool
|
||||
Shutdown bool
|
||||
}{
|
||||
Type: "ManualReader",
|
||||
Registered: r.sdkProducer.Load() != nil,
|
||||
Shutdown: down,
|
||||
}
|
||||
}
|
||||
|
||||
// manualReaderConfig contains configuration options for a ManualReader.
|
||||
type manualReaderConfig struct {
|
||||
temporalitySelector TemporalitySelector
|
||||
aggregationSelector AggregationSelector
|
||||
producers []Producer
|
||||
}
|
||||
|
||||
// newManualReaderConfig returns a manualReaderConfig configured with options.
|
||||
func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig {
|
||||
cfg := manualReaderConfig{
|
||||
temporalitySelector: DefaultTemporalitySelector,
|
||||
aggregationSelector: DefaultAggregationSelector,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
cfg = opt.applyManual(cfg)
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// ManualReaderOption applies a configuration option value to a ManualReader.
|
||||
type ManualReaderOption interface {
|
||||
applyManual(manualReaderConfig) manualReaderConfig
|
||||
}
|
||||
|
||||
// WithTemporalitySelector sets the TemporalitySelector a reader will use to
|
||||
// determine the Temporality of an instrument based on its kind. If this
|
||||
// option is not used, the reader will use the DefaultTemporalitySelector.
|
||||
func WithTemporalitySelector(selector TemporalitySelector) ManualReaderOption {
|
||||
return temporalitySelectorOption{selector: selector}
|
||||
}
|
||||
|
||||
type temporalitySelectorOption struct {
|
||||
selector func(instrument InstrumentKind) metricdata.Temporality
|
||||
}
|
||||
|
||||
// applyManual returns a manualReaderConfig with option applied.
|
||||
func (t temporalitySelectorOption) applyManual(mrc manualReaderConfig) manualReaderConfig {
|
||||
mrc.temporalitySelector = t.selector
|
||||
return mrc
|
||||
}
|
||||
|
||||
// WithAggregationSelector sets the AggregationSelector a reader will use to
|
||||
// determine the aggregation to use for an instrument based on its kind. If
|
||||
// this option is not used, the reader will use the DefaultAggregationSelector
|
||||
// or the aggregation explicitly passed for a view matching an instrument.
|
||||
func WithAggregationSelector(selector AggregationSelector) ManualReaderOption {
|
||||
return aggregationSelectorOption{selector: selector}
|
||||
}
|
||||
|
||||
type aggregationSelectorOption struct {
|
||||
selector AggregationSelector
|
||||
}
|
||||
|
||||
// applyManual returns a manualReaderConfig with option applied.
|
||||
func (t aggregationSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig {
|
||||
c.aggregationSelector = t.selector
|
||||
return c
|
||||
}
|
549
vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
generated
vendored
Normal file
549
vendor/go.opentelemetry.io/otel/sdk/metric/meter.go
generated
vendored
Normal file
@@ -0,0 +1,549 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/embedded"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInstrumentName indicates the created instrument has an invalid name.
|
||||
// Valid names must consist of 255 or fewer characters including alphanumeric, _, ., -, / and start with a letter.
|
||||
ErrInstrumentName = errors.New("invalid instrument name")
|
||||
)
|
||||
|
||||
// meter handles the creation and coordination of all metric instruments. A
|
||||
// meter represents a single instrumentation scope; all metric telemetry
|
||||
// produced by an instrumentation scope will use metric instruments from a
|
||||
// single meter.
|
||||
type meter struct {
|
||||
embedded.Meter
|
||||
|
||||
scope instrumentation.Scope
|
||||
pipes pipelines
|
||||
|
||||
int64Resolver resolver[int64]
|
||||
float64Resolver resolver[float64]
|
||||
}
|
||||
|
||||
func newMeter(s instrumentation.Scope, p pipelines) *meter {
|
||||
// viewCache ensures instrument conflicts, including number conflicts, this
|
||||
// meter is asked to create are logged to the user.
|
||||
var viewCache cache[string, instID]
|
||||
|
||||
return &meter{
|
||||
scope: s,
|
||||
pipes: p,
|
||||
int64Resolver: newResolver[int64](p, &viewCache),
|
||||
float64Resolver: newResolver[float64](p, &viewCache),
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time check meter implements metric.Meter.
|
||||
var _ metric.Meter = (*meter)(nil)
|
||||
|
||||
// Int64Counter returns a new instrument identified by name and configured with
|
||||
// options. The instrument is used to synchronously record increasing int64
|
||||
// measurements during a computational operation.
|
||||
func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
|
||||
cfg := metric.NewInt64CounterConfig(options...)
|
||||
const kind = InstrumentKindCounter
|
||||
p := int64InstProvider{m}
|
||||
i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
|
||||
return i, validateInstrumentName(name)
|
||||
}
|
||||
|
||||
// Int64UpDownCounter returns a new instrument identified by name and
|
||||
// configured with options. The instrument is used to synchronously record
|
||||
// int64 measurements during a computational operation.
|
||||
func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
|
||||
cfg := metric.NewInt64UpDownCounterConfig(options...)
|
||||
const kind = InstrumentKindUpDownCounter
|
||||
p := int64InstProvider{m}
|
||||
i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
|
||||
return i, validateInstrumentName(name)
|
||||
}
|
||||
|
||||
// Int64Histogram returns a new instrument identified by name and configured
|
||||
// with options. The instrument is used to synchronously record the
|
||||
// distribution of int64 measurements during a computational operation.
|
||||
func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
|
||||
cfg := metric.NewInt64HistogramConfig(options...)
|
||||
const kind = InstrumentKindHistogram
|
||||
p := int64InstProvider{m}
|
||||
i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
|
||||
return i, validateInstrumentName(name)
|
||||
}
|
||||
|
||||
// Int64ObservableCounter returns a new instrument identified by name and
|
||||
// configured with options. The instrument is used to asynchronously record
|
||||
// increasing int64 measurements once per a measurement collection cycle.
|
||||
// Only the measurements recorded during the collection cycle are exported.
|
||||
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
|
||||
cfg := metric.NewInt64ObservableCounterConfig(options...)
|
||||
const kind = InstrumentKindObservableCounter
|
||||
p := int64ObservProvider{m}
|
||||
inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.registerCallbacks(inst, cfg.Callbacks())
|
||||
return inst, validateInstrumentName(name)
|
||||
}
|
||||
|
||||
// Int64ObservableUpDownCounter returns a new instrument identified by name and
|
||||
// configured with options. The instrument is used to asynchronously record
|
||||
// int64 measurements once per a measurement collection cycle. Only the
|
||||
// measurements recorded during the collection cycle are exported.
|
||||
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
|
||||
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
|
||||
const kind = InstrumentKindObservableUpDownCounter
|
||||
p := int64ObservProvider{m}
|
||||
inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.registerCallbacks(inst, cfg.Callbacks())
|
||||
return inst, validateInstrumentName(name)
|
||||
}
|
||||
|
||||
// Int64ObservableGauge returns a new instrument identified by name and
|
||||
// configured with options. The instrument is used to asynchronously record
|
||||
// instantaneous int64 measurements once per a measurement collection cycle.
|
||||
// Only the measurements recorded during the collection cycle are exported.
|
||||
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
|
||||
cfg := metric.NewInt64ObservableGaugeConfig(options...)
|
||||
const kind = InstrumentKindObservableGauge
|
||||
p := int64ObservProvider{m}
|
||||
inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.registerCallbacks(inst, cfg.Callbacks())
|
||||
return inst, validateInstrumentName(name)
|
||||
}
|
||||
|
||||
// Float64Counter returns a new instrument identified by name and configured
|
||||
// with options. The instrument is used to synchronously record increasing
|
||||
// float64 measurements during a computational operation.
|
||||
func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
|
||||
cfg := metric.NewFloat64CounterConfig(options...)
|
||||
const kind = InstrumentKindCounter
|
||||
p := float64InstProvider{m}
|
||||
i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
|
||||
return i, validateInstrumentName(name)
|
||||
}
|
||||
|
||||
// Float64UpDownCounter returns a new instrument identified by name and
|
||||
// configured with options. The instrument is used to synchronously record
|
||||
// float64 measurements during a computational operation.
|
||||
func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
|
||||
cfg := metric.NewFloat64UpDownCounterConfig(options...)
|
||||
const kind = InstrumentKindUpDownCounter
|
||||
p := float64InstProvider{m}
|
||||
i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
|
||||
return i, validateInstrumentName(name)
|
||||
}
|
||||
|
||||
// Float64Histogram returns a new instrument identified by name and configured
|
||||
// with options. The instrument is used to synchronously record the
|
||||
// distribution of float64 measurements during a computational operation.
|
||||
func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
|
||||
cfg := metric.NewFloat64HistogramConfig(options...)
|
||||
const kind = InstrumentKindHistogram
|
||||
p := float64InstProvider{m}
|
||||
i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
|
||||
return i, validateInstrumentName(name)
|
||||
}
|
||||
|
||||
// Float64ObservableCounter returns a new instrument identified by name and
|
||||
// configured with options. The instrument is used to asynchronously record
|
||||
// increasing float64 measurements once per a measurement collection cycle.
|
||||
// Only the measurements recorded during the collection cycle are exported.
|
||||
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
|
||||
cfg := metric.NewFloat64ObservableCounterConfig(options...)
|
||||
const kind = InstrumentKindObservableCounter
|
||||
p := float64ObservProvider{m}
|
||||
inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.registerCallbacks(inst, cfg.Callbacks())
|
||||
return inst, validateInstrumentName(name)
|
||||
}
|
||||
|
||||
// Float64ObservableUpDownCounter returns a new instrument identified by name
|
||||
// and configured with options. The instrument is used to asynchronously record
|
||||
// float64 measurements once per a measurement collection cycle. Only the
|
||||
// measurements recorded during the collection cycle are exported.
|
||||
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
|
||||
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
|
||||
const kind = InstrumentKindObservableUpDownCounter
|
||||
p := float64ObservProvider{m}
|
||||
inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.registerCallbacks(inst, cfg.Callbacks())
|
||||
return inst, validateInstrumentName(name)
|
||||
}
|
||||
|
||||
// Float64ObservableGauge returns a new instrument identified by name and
|
||||
// configured with options. The instrument is used to asynchronously record
|
||||
// instantaneous float64 measurements once per a measurement collection cycle.
|
||||
// Only the measurements recorded during the collection cycle are exported.
|
||||
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
|
||||
cfg := metric.NewFloat64ObservableGaugeConfig(options...)
|
||||
const kind = InstrumentKindObservableGauge
|
||||
p := float64ObservProvider{m}
|
||||
inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.registerCallbacks(inst, cfg.Callbacks())
|
||||
return inst, validateInstrumentName(name)
|
||||
}
|
||||
|
||||
func validateInstrumentName(name string) error {
|
||||
if len(name) == 0 {
|
||||
return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name)
|
||||
}
|
||||
if len(name) > 255 {
|
||||
return fmt.Errorf("%w: %s: longer than 255 characters", ErrInstrumentName, name)
|
||||
}
|
||||
if !isAlpha([]rune(name)[0]) {
|
||||
return fmt.Errorf("%w: %s: must start with a letter", ErrInstrumentName, name)
|
||||
}
|
||||
if len(name) == 1 {
|
||||
return nil
|
||||
}
|
||||
for _, c := range name[1:] {
|
||||
if !isAlphanumeric(c) && c != '_' && c != '.' && c != '-' && c != '/' {
|
||||
return fmt.Errorf("%w: %s: must only contain [A-Za-z0-9_.-/]", ErrInstrumentName, name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func isAlpha(c rune) bool {
|
||||
return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z')
|
||||
}
|
||||
func isAlphanumeric(c rune) bool {
|
||||
return isAlpha(c) || ('0' <= c && c <= '9')
|
||||
}
|
||||
|
||||
// RegisterCallback registers f to be called each collection cycle so it will
|
||||
// make observations for insts during those cycles.
|
||||
//
|
||||
// The only instruments f can make observations for are insts. All other
|
||||
// observations will be dropped and an error will be logged.
|
||||
//
|
||||
// Only instruments from this meter can be registered with f, an error is
|
||||
// returned if other instrument are provided.
|
||||
//
|
||||
// Only observations made in the callback will be exported. Unlike synchronous
|
||||
// instruments, asynchronous callbacks can "forget" attribute sets that are no
|
||||
// longer relevant by omitting the observation during the callback.
|
||||
//
|
||||
// The returned Registration can be used to unregister f.
|
||||
func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
|
||||
if len(insts) == 0 {
|
||||
// Don't allocate a observer if not needed.
|
||||
return noopRegister{}, nil
|
||||
}
|
||||
|
||||
reg := newObserver()
|
||||
var errs multierror
|
||||
for _, inst := range insts {
|
||||
// Unwrap any global.
|
||||
if u, ok := inst.(interface {
|
||||
Unwrap() metric.Observable
|
||||
}); ok {
|
||||
inst = u.Unwrap()
|
||||
}
|
||||
|
||||
switch o := inst.(type) {
|
||||
case int64Observable:
|
||||
if err := o.registerable(m); err != nil {
|
||||
if !errors.Is(err, errEmptyAgg) {
|
||||
errs.append(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
reg.registerInt64(o.observablID)
|
||||
case float64Observable:
|
||||
if err := o.registerable(m); err != nil {
|
||||
if !errors.Is(err, errEmptyAgg) {
|
||||
errs.append(err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
reg.registerFloat64(o.observablID)
|
||||
default:
|
||||
// Instrument external to the SDK.
|
||||
return nil, fmt.Errorf("invalid observable: from different implementation")
|
||||
}
|
||||
}
|
||||
|
||||
err := errs.errorOrNil()
|
||||
if reg.len() == 0 {
|
||||
// All insts use drop aggregation or are invalid.
|
||||
return noopRegister{}, err
|
||||
}
|
||||
|
||||
// Some or all instruments were valid.
|
||||
cback := func(ctx context.Context) error { return f(ctx, reg) }
|
||||
return m.pipes.registerMultiCallback(cback), err
|
||||
}
|
||||
|
||||
type observer struct {
|
||||
embedded.Observer
|
||||
|
||||
float64 map[observablID[float64]]struct{}
|
||||
int64 map[observablID[int64]]struct{}
|
||||
}
|
||||
|
||||
func newObserver() observer {
|
||||
return observer{
|
||||
float64: make(map[observablID[float64]]struct{}),
|
||||
int64: make(map[observablID[int64]]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (r observer) len() int {
|
||||
return len(r.float64) + len(r.int64)
|
||||
}
|
||||
|
||||
func (r observer) registerFloat64(id observablID[float64]) {
|
||||
r.float64[id] = struct{}{}
|
||||
}
|
||||
|
||||
func (r observer) registerInt64(id observablID[int64]) {
|
||||
r.int64[id] = struct{}{}
|
||||
}
|
||||
|
||||
var (
|
||||
errUnknownObserver = errors.New("unknown observable instrument")
|
||||
errUnregObserver = errors.New("observable instrument not registered for callback")
|
||||
)
|
||||
|
||||
func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...metric.ObserveOption) {
|
||||
var oImpl float64Observable
|
||||
switch conv := o.(type) {
|
||||
case float64Observable:
|
||||
oImpl = conv
|
||||
case interface {
|
||||
Unwrap() metric.Observable
|
||||
}:
|
||||
// Unwrap any global.
|
||||
async := conv.Unwrap()
|
||||
var ok bool
|
||||
if oImpl, ok = async.(float64Observable); !ok {
|
||||
global.Error(errUnknownObserver, "failed to record asynchronous")
|
||||
return
|
||||
}
|
||||
default:
|
||||
global.Error(errUnknownObserver, "failed to record")
|
||||
return
|
||||
}
|
||||
|
||||
if _, registered := r.float64[oImpl.observablID]; !registered {
|
||||
global.Error(errUnregObserver, "failed to record",
|
||||
"name", oImpl.name,
|
||||
"description", oImpl.description,
|
||||
"unit", oImpl.unit,
|
||||
"number", fmt.Sprintf("%T", float64(0)),
|
||||
)
|
||||
return
|
||||
}
|
||||
c := metric.NewObserveConfig(opts)
|
||||
oImpl.observe(v, c.Attributes())
|
||||
}
|
||||
|
||||
func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) {
|
||||
var oImpl int64Observable
|
||||
switch conv := o.(type) {
|
||||
case int64Observable:
|
||||
oImpl = conv
|
||||
case interface {
|
||||
Unwrap() metric.Observable
|
||||
}:
|
||||
// Unwrap any global.
|
||||
async := conv.Unwrap()
|
||||
var ok bool
|
||||
if oImpl, ok = async.(int64Observable); !ok {
|
||||
global.Error(errUnknownObserver, "failed to record asynchronous")
|
||||
return
|
||||
}
|
||||
default:
|
||||
global.Error(errUnknownObserver, "failed to record")
|
||||
return
|
||||
}
|
||||
|
||||
if _, registered := r.int64[oImpl.observablID]; !registered {
|
||||
global.Error(errUnregObserver, "failed to record",
|
||||
"name", oImpl.name,
|
||||
"description", oImpl.description,
|
||||
"unit", oImpl.unit,
|
||||
"number", fmt.Sprintf("%T", int64(0)),
|
||||
)
|
||||
return
|
||||
}
|
||||
c := metric.NewObserveConfig(opts)
|
||||
oImpl.observe(v, c.Attributes())
|
||||
}
|
||||
|
||||
type noopRegister struct{ embedded.Registration }
|
||||
|
||||
func (noopRegister) Unregister() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// int64InstProvider provides int64 OpenTelemetry instruments.
|
||||
type int64InstProvider struct{ *meter }
|
||||
|
||||
func (p int64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[int64], error) {
|
||||
inst := Instrument{
|
||||
Name: name,
|
||||
Description: desc,
|
||||
Unit: u,
|
||||
Kind: kind,
|
||||
Scope: p.scope,
|
||||
}
|
||||
return p.int64Resolver.Aggregators(inst)
|
||||
}
|
||||
|
||||
// lookup returns the resolved instrumentImpl.
|
||||
func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) {
|
||||
aggs, err := p.aggs(kind, name, desc, u)
|
||||
return &int64Inst{measures: aggs}, err
|
||||
}
|
||||
|
||||
// float64InstProvider provides float64 OpenTelemetry instruments.
|
||||
type float64InstProvider struct{ *meter }
|
||||
|
||||
func (p float64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[float64], error) {
|
||||
inst := Instrument{
|
||||
Name: name,
|
||||
Description: desc,
|
||||
Unit: u,
|
||||
Kind: kind,
|
||||
Scope: p.scope,
|
||||
}
|
||||
return p.float64Resolver.Aggregators(inst)
|
||||
}
|
||||
|
||||
// lookup returns the resolved instrumentImpl.
|
||||
func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) {
|
||||
aggs, err := p.aggs(kind, name, desc, u)
|
||||
return &float64Inst{measures: aggs}, err
|
||||
}
|
||||
|
||||
type int64ObservProvider struct{ *meter }
|
||||
|
||||
func (p int64ObservProvider) lookup(kind InstrumentKind, name, desc, u string) (int64Observable, error) {
|
||||
aggs, err := (int64InstProvider)(p).aggs(kind, name, desc, u)
|
||||
return newInt64Observable(p.meter, kind, name, desc, u, aggs), err
|
||||
}
|
||||
|
||||
func (p int64ObservProvider) registerCallbacks(inst int64Observable, cBacks []metric.Int64Callback) {
|
||||
if inst.observable == nil || len(inst.measures) == 0 {
|
||||
// Drop aggregator.
|
||||
return
|
||||
}
|
||||
|
||||
for _, cBack := range cBacks {
|
||||
p.pipes.registerCallback(p.callback(inst, cBack))
|
||||
}
|
||||
}
|
||||
|
||||
func (p int64ObservProvider) callback(i int64Observable, f metric.Int64Callback) func(context.Context) error {
|
||||
inst := int64Observer{int64Observable: i}
|
||||
return func(ctx context.Context) error { return f(ctx, inst) }
|
||||
}
|
||||
|
||||
type int64Observer struct {
|
||||
embedded.Int64Observer
|
||||
int64Observable
|
||||
}
|
||||
|
||||
func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) {
|
||||
c := metric.NewObserveConfig(opts)
|
||||
o.observe(val, c.Attributes())
|
||||
}
|
||||
|
||||
type float64ObservProvider struct{ *meter }
|
||||
|
||||
func (p float64ObservProvider) lookup(kind InstrumentKind, name, desc, u string) (float64Observable, error) {
|
||||
aggs, err := (float64InstProvider)(p).aggs(kind, name, desc, u)
|
||||
return newFloat64Observable(p.meter, kind, name, desc, u, aggs), err
|
||||
}
|
||||
|
||||
func (p float64ObservProvider) registerCallbacks(inst float64Observable, cBacks []metric.Float64Callback) {
|
||||
if inst.observable == nil || len(inst.measures) == 0 {
|
||||
// Drop aggregator.
|
||||
return
|
||||
}
|
||||
|
||||
for _, cBack := range cBacks {
|
||||
p.pipes.registerCallback(p.callback(inst, cBack))
|
||||
}
|
||||
}
|
||||
|
||||
func (p float64ObservProvider) callback(i float64Observable, f metric.Float64Callback) func(context.Context) error {
|
||||
inst := float64Observer{float64Observable: i}
|
||||
return func(ctx context.Context) error { return f(ctx, inst) }
|
||||
}
|
||||
|
||||
type float64Observer struct {
|
||||
embedded.Float64Observer
|
||||
float64Observable
|
||||
}
|
||||
|
||||
func (o float64Observer) Observe(val float64, opts ...metric.ObserveOption) {
|
||||
c := metric.NewObserveConfig(opts)
|
||||
o.observe(val, c.Attributes())
|
||||
}
|
242
vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
generated
vendored
Normal file
242
vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/data.go
generated
vendored
Normal file
@@ -0,0 +1,242 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
// ResourceMetrics is a collection of ScopeMetrics and the associated Resource
|
||||
// that created them.
|
||||
type ResourceMetrics struct {
|
||||
// Resource represents the entity that collected the metrics.
|
||||
Resource *resource.Resource
|
||||
// ScopeMetrics are the collection of metrics with unique Scopes.
|
||||
ScopeMetrics []ScopeMetrics
|
||||
}
|
||||
|
||||
// ScopeMetrics is a collection of Metrics Produces by a Meter.
|
||||
type ScopeMetrics struct {
|
||||
// Scope is the Scope that the Meter was created with.
|
||||
Scope instrumentation.Scope
|
||||
// Metrics are a list of aggregations created by the Meter.
|
||||
Metrics []Metrics
|
||||
}
|
||||
|
||||
// Metrics is a collection of one or more aggregated timeseries from an Instrument.
|
||||
type Metrics struct {
|
||||
// Name is the name of the Instrument that created this data.
|
||||
Name string
|
||||
// Description is the description of the Instrument, which can be used in documentation.
|
||||
Description string
|
||||
// Unit is the unit in which the Instrument reports.
|
||||
Unit string
|
||||
// Data is the aggregated data from an Instrument.
|
||||
Data Aggregation
|
||||
}
|
||||
|
||||
// Aggregation is the store of data reported by an Instrument.
|
||||
// It will be one of: Gauge, Sum, Histogram.
|
||||
type Aggregation interface {
|
||||
privateAggregation()
|
||||
}
|
||||
|
||||
// Gauge represents a measurement of the current value of an instrument.
|
||||
type Gauge[N int64 | float64] struct {
|
||||
// DataPoints are the individual aggregated measurements with unique
|
||||
// Attributes.
|
||||
DataPoints []DataPoint[N]
|
||||
}
|
||||
|
||||
func (Gauge[N]) privateAggregation() {}
|
||||
|
||||
// Sum represents the sum of all measurements of values from an instrument.
|
||||
type Sum[N int64 | float64] struct {
|
||||
// DataPoints are the individual aggregated measurements with unique
|
||||
// Attributes.
|
||||
DataPoints []DataPoint[N]
|
||||
// Temporality describes if the aggregation is reported as the change from the
|
||||
// last report time, or the cumulative changes since a fixed start time.
|
||||
Temporality Temporality
|
||||
// IsMonotonic represents if this aggregation only increases or decreases.
|
||||
IsMonotonic bool
|
||||
}
|
||||
|
||||
func (Sum[N]) privateAggregation() {}
|
||||
|
||||
// DataPoint is a single data point in a timeseries.
|
||||
type DataPoint[N int64 | float64] struct {
|
||||
// Attributes is the set of key value pairs that uniquely identify the
|
||||
// timeseries.
|
||||
Attributes attribute.Set
|
||||
// StartTime is when the timeseries was started. (optional)
|
||||
StartTime time.Time `json:",omitempty"`
|
||||
// Time is the time when the timeseries was recorded. (optional)
|
||||
Time time.Time `json:",omitempty"`
|
||||
// Value is the value of this data point.
|
||||
Value N
|
||||
|
||||
// Exemplars is the sampled Exemplars collected during the timeseries.
|
||||
Exemplars []Exemplar[N] `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Histogram represents the histogram of all measurements of values from an instrument.
|
||||
type Histogram[N int64 | float64] struct {
|
||||
// DataPoints are the individual aggregated measurements with unique
|
||||
// Attributes.
|
||||
DataPoints []HistogramDataPoint[N]
|
||||
// Temporality describes if the aggregation is reported as the change from the
|
||||
// last report time, or the cumulative changes since a fixed start time.
|
||||
Temporality Temporality
|
||||
}
|
||||
|
||||
func (Histogram[N]) privateAggregation() {}
|
||||
|
||||
// HistogramDataPoint is a single histogram data point in a timeseries.
|
||||
type HistogramDataPoint[N int64 | float64] struct {
|
||||
// Attributes is the set of key value pairs that uniquely identify the
|
||||
// timeseries.
|
||||
Attributes attribute.Set
|
||||
// StartTime is when the timeseries was started.
|
||||
StartTime time.Time
|
||||
// Time is the time when the timeseries was recorded.
|
||||
Time time.Time
|
||||
|
||||
// Count is the number of updates this histogram has been calculated with.
|
||||
Count uint64
|
||||
// Bounds are the upper bounds of the buckets of the histogram. Because the
|
||||
// last boundary is +infinity this one is implied.
|
||||
Bounds []float64
|
||||
// BucketCounts is the count of each of the buckets.
|
||||
BucketCounts []uint64
|
||||
|
||||
// Min is the minimum value recorded. (optional)
|
||||
Min Extrema[N]
|
||||
// Max is the maximum value recorded. (optional)
|
||||
Max Extrema[N]
|
||||
// Sum is the sum of the values recorded.
|
||||
Sum N
|
||||
|
||||
// Exemplars is the sampled Exemplars collected during the timeseries.
|
||||
Exemplars []Exemplar[N] `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ExponentialHistogram represents the histogram of all measurements of values from an instrument.
|
||||
type ExponentialHistogram[N int64 | float64] struct {
|
||||
// DataPoints are the individual aggregated measurements with unique
|
||||
// attributes.
|
||||
DataPoints []ExponentialHistogramDataPoint[N]
|
||||
// Temporality describes if the aggregation is reported as the change from the
|
||||
// last report time, or the cumulative changes since a fixed start time.
|
||||
Temporality Temporality
|
||||
}
|
||||
|
||||
func (ExponentialHistogram[N]) privateAggregation() {}
|
||||
|
||||
// ExponentialHistogramDataPoint is a single exponential histogram data point in a timeseries.
|
||||
type ExponentialHistogramDataPoint[N int64 | float64] struct {
|
||||
// Attributes is the set of key value pairs that uniquely identify the
|
||||
// timeseries.
|
||||
Attributes attribute.Set
|
||||
// StartTime is when the timeseries was started.
|
||||
StartTime time.Time
|
||||
// Time is the time when the timeseries was recorded.
|
||||
Time time.Time
|
||||
|
||||
// Count is the number of updates this histogram has been calculated with.
|
||||
Count uint64
|
||||
// Min is the minimum value recorded. (optional)
|
||||
Min Extrema[N]
|
||||
// Max is the maximum value recorded. (optional)
|
||||
Max Extrema[N]
|
||||
// Sum is the sum of the values recorded.
|
||||
Sum N
|
||||
|
||||
// Scale describes the resolution of the histogram. Boundaries are
|
||||
// located at powers of the base, where:
|
||||
//
|
||||
// base = 2 ^ (2 ^ -Scale)
|
||||
Scale int32
|
||||
// ZeroCount is the number of values whose absolute value
|
||||
// is less than or equal to [ZeroThreshold].
|
||||
// When ZeroThreshold is 0, this is the number of values that
|
||||
// cannot be expressed using the standard exponential formula
|
||||
// as well as values that have been rounded to zero.
|
||||
// ZeroCount represents the special zero count bucket.
|
||||
ZeroCount uint64
|
||||
|
||||
// PositiveBucket is range of positive value bucket counts.
|
||||
PositiveBucket ExponentialBucket
|
||||
// NegativeBucket is range of negative value bucket counts.
|
||||
NegativeBucket ExponentialBucket
|
||||
|
||||
// ZeroThreshold is the width of the zero region. Where the zero region is
|
||||
// defined as the closed interval [-ZeroThreshold, ZeroThreshold].
|
||||
ZeroThreshold float64
|
||||
|
||||
// Exemplars is the sampled Exemplars collected during the timeseries.
|
||||
Exemplars []Exemplar[N] `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ExponentialBucket are a set of bucket counts, encoded in a contiguous array
|
||||
// of counts.
|
||||
type ExponentialBucket struct {
|
||||
// Offset is the bucket index of the first entry in the Counts slice.
|
||||
Offset int32
|
||||
// Counts is an slice where Counts[i] carries the count of the bucket at
|
||||
// index (Offset+i). Counts[i] is the count of values greater than
|
||||
// base^(Offset+i) and less than or equal to base^(Offset+i+1).
|
||||
Counts []uint64
|
||||
}
|
||||
|
||||
// Extrema is the minimum or maximum value of a dataset.
|
||||
type Extrema[N int64 | float64] struct {
|
||||
value N
|
||||
valid bool
|
||||
}
|
||||
|
||||
// NewExtrema returns an Extrema set to v.
|
||||
func NewExtrema[N int64 | float64](v N) Extrema[N] {
|
||||
return Extrema[N]{value: v, valid: true}
|
||||
}
|
||||
|
||||
// Value returns the Extrema value and true if the Extrema is defined.
|
||||
// Otherwise, if the Extrema is its zero-value, defined will be false.
|
||||
func (e Extrema[N]) Value() (v N, defined bool) {
|
||||
return e.value, e.valid
|
||||
}
|
||||
|
||||
// Exemplar is a measurement sampled from a timeseries providing a typical
|
||||
// example.
|
||||
type Exemplar[N int64 | float64] struct {
|
||||
// FilteredAttributes are the attributes recorded with the measurement but
|
||||
// filtered out of the timeseries' aggregated data.
|
||||
FilteredAttributes []attribute.KeyValue
|
||||
// Time is the time when the measurement was recorded.
|
||||
Time time.Time
|
||||
// Value is the measured value.
|
||||
Value N
|
||||
// SpanID is the ID of the span that was active during the measurement. If
|
||||
// no span was active or the span was not sampled this will be empty.
|
||||
SpanID []byte `json:",omitempty"`
|
||||
// TraceID is the ID of the trace the active span belonged to during the
|
||||
// measurement. If no span was active or the span was not sampled this will
|
||||
// be empty.
|
||||
TraceID []byte `json:",omitempty"`
|
||||
}
|
41
vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
generated
vendored
Normal file
41
vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate stringer -type=Temporality
|
||||
|
||||
package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
|
||||
// Temporality defines the window that an aggregation was calculated over.
|
||||
type Temporality uint8
|
||||
|
||||
const (
|
||||
// undefinedTemporality represents an unset Temporality.
|
||||
//nolint:deadcode,unused,varcheck
|
||||
undefinedTemporality Temporality = iota
|
||||
|
||||
// CumulativeTemporality defines a measurement interval that continues to
|
||||
// expand forward in time from a starting point. New measurements are
|
||||
// added to all previous measurements since a start time.
|
||||
CumulativeTemporality
|
||||
|
||||
// DeltaTemporality defines a measurement interval that resets each cycle.
|
||||
// Measurements from one cycle are recorded independently, measurements
|
||||
// from other cycles do not affect them.
|
||||
DeltaTemporality
|
||||
)
|
||||
|
||||
// MarshalText returns the byte encoded of t.
|
||||
func (t Temporality) MarshalText() ([]byte, error) {
|
||||
return []byte(t.String()), nil
|
||||
}
|
25
vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
generated
vendored
Normal file
25
vendor/go.opentelemetry.io/otel/sdk/metric/metricdata/temporality_string.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// Code generated by "stringer -type=Temporality"; DO NOT EDIT.
|
||||
|
||||
package metricdata
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[undefinedTemporality-0]
|
||||
_ = x[CumulativeTemporality-1]
|
||||
_ = x[DeltaTemporality-2]
|
||||
}
|
||||
|
||||
const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTemporality"
|
||||
|
||||
var _Temporality_index = [...]uint8{0, 20, 41, 57}
|
||||
|
||||
func (i Temporality) String() string {
|
||||
if i >= Temporality(len(_Temporality_index)-1) {
|
||||
return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]]
|
||||
}
|
380
vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
generated
vendored
Normal file
380
vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go
generated
vendored
Normal file
@@ -0,0 +1,380 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// Default periodic reader timing.
|
||||
const (
|
||||
defaultTimeout = time.Millisecond * 30000
|
||||
defaultInterval = time.Millisecond * 60000
|
||||
)
|
||||
|
||||
// periodicReaderConfig contains configuration options for a PeriodicReader.
|
||||
type periodicReaderConfig struct {
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
producers []Producer
|
||||
}
|
||||
|
||||
// newPeriodicReaderConfig returns a periodicReaderConfig configured with
|
||||
// options.
|
||||
func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig {
|
||||
c := periodicReaderConfig{
|
||||
interval: envDuration(envInterval, defaultInterval),
|
||||
timeout: envDuration(envTimeout, defaultTimeout),
|
||||
}
|
||||
for _, o := range options {
|
||||
c = o.applyPeriodic(c)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// PeriodicReaderOption applies a configuration option value to a PeriodicReader.
|
||||
type PeriodicReaderOption interface {
|
||||
applyPeriodic(periodicReaderConfig) periodicReaderConfig
|
||||
}
|
||||
|
||||
// periodicReaderOptionFunc applies a set of options to a periodicReaderConfig.
|
||||
type periodicReaderOptionFunc func(periodicReaderConfig) periodicReaderConfig
|
||||
|
||||
// applyPeriodic returns a periodicReaderConfig with option(s) applied.
|
||||
func (o periodicReaderOptionFunc) applyPeriodic(conf periodicReaderConfig) periodicReaderConfig {
|
||||
return o(conf)
|
||||
}
|
||||
|
||||
// WithTimeout configures the time a PeriodicReader waits for an export to
|
||||
// complete before canceling it. This includes an export which occurs as part
|
||||
// of Shutdown or ForceFlush if the user passed context does not have a
|
||||
// deadline. If the user passed context does have a deadline, it will be used
|
||||
// instead.
|
||||
//
|
||||
// This option overrides any value set for the
|
||||
// OTEL_METRIC_EXPORT_TIMEOUT environment variable.
|
||||
//
|
||||
// If this option is not used or d is less than or equal to zero, 30 seconds
|
||||
// is used as the default.
|
||||
func WithTimeout(d time.Duration) PeriodicReaderOption {
|
||||
return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig {
|
||||
if d <= 0 {
|
||||
return conf
|
||||
}
|
||||
conf.timeout = d
|
||||
return conf
|
||||
})
|
||||
}
|
||||
|
||||
// WithInterval configures the intervening time between exports for a
|
||||
// PeriodicReader.
|
||||
//
|
||||
// This option overrides any value set for the
|
||||
// OTEL_METRIC_EXPORT_INTERVAL environment variable.
|
||||
//
|
||||
// If this option is not used or d is less than or equal to zero, 60 seconds
|
||||
// is used as the default.
|
||||
func WithInterval(d time.Duration) PeriodicReaderOption {
|
||||
return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig {
|
||||
if d <= 0 {
|
||||
return conf
|
||||
}
|
||||
conf.interval = d
|
||||
return conf
|
||||
})
|
||||
}
|
||||
|
||||
// NewPeriodicReader returns a Reader that collects and exports metric data to
|
||||
// the exporter at a defined interval. By default, the returned Reader will
|
||||
// collect and export data every 60 seconds, and will cancel any attempts that
|
||||
// exceed 30 seconds, collect and export combined. The collect and export time
|
||||
// are not counted towards the interval between attempts.
|
||||
//
|
||||
// The Collect method of the returned Reader continues to gather and return
|
||||
// metric data to the user. It will not automatically send that data to the
|
||||
// exporter. That is left to the user to accomplish.
|
||||
func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *PeriodicReader {
|
||||
conf := newPeriodicReaderConfig(options)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r := &PeriodicReader{
|
||||
interval: conf.interval,
|
||||
timeout: conf.timeout,
|
||||
exporter: exporter,
|
||||
flushCh: make(chan chan error),
|
||||
cancel: cancel,
|
||||
done: make(chan struct{}),
|
||||
rmPool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &metricdata.ResourceMetrics{}
|
||||
}},
|
||||
}
|
||||
r.externalProducers.Store(conf.producers)
|
||||
|
||||
go func() {
|
||||
defer func() { close(r.done) }()
|
||||
r.run(ctx, conf.interval)
|
||||
}()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// PeriodicReader is a Reader that continuously collects and exports metric
|
||||
// data at a set interval.
|
||||
type PeriodicReader struct {
|
||||
sdkProducer atomic.Value
|
||||
|
||||
mu sync.Mutex
|
||||
isShutdown bool
|
||||
externalProducers atomic.Value
|
||||
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
exporter Exporter
|
||||
flushCh chan chan error
|
||||
|
||||
done chan struct{}
|
||||
cancel context.CancelFunc
|
||||
shutdownOnce sync.Once
|
||||
|
||||
rmPool sync.Pool
|
||||
}
|
||||
|
||||
// Compile time check the periodicReader implements Reader and is comparable.
|
||||
var _ = map[Reader]struct{}{&PeriodicReader{}: {}}
|
||||
|
||||
// newTicker allows testing override.
|
||||
var newTicker = time.NewTicker
|
||||
|
||||
// run continuously collects and exports metric data at the specified
|
||||
// interval. This will run until ctx is canceled or times out.
|
||||
func (r *PeriodicReader) run(ctx context.Context, interval time.Duration) {
|
||||
ticker := newTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
err := r.collectAndExport(ctx)
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
case errCh := <-r.flushCh:
|
||||
errCh <- r.collectAndExport(ctx)
|
||||
ticker.Reset(interval)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// register registers p as the producer of this reader.
|
||||
func (r *PeriodicReader) register(p sdkProducer) {
|
||||
// Only register once. If producer is already set, do nothing.
|
||||
if !r.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) {
|
||||
msg := "did not register periodic reader"
|
||||
global.Error(errDuplicateRegister, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// temporality reports the Temporality for the instrument kind provided.
|
||||
func (r *PeriodicReader) temporality(kind InstrumentKind) metricdata.Temporality {
|
||||
return r.exporter.Temporality(kind)
|
||||
}
|
||||
|
||||
// aggregation returns what Aggregation to use for kind.
|
||||
func (r *PeriodicReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type.
|
||||
return r.exporter.Aggregation(kind)
|
||||
}
|
||||
|
||||
// collectAndExport gather all metric data related to the periodicReader r from
|
||||
// the SDK and exports it with r's exporter.
|
||||
func (r *PeriodicReader) collectAndExport(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
|
||||
// TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect.
|
||||
rm := r.rmPool.Get().(*metricdata.ResourceMetrics)
|
||||
err := r.Collect(ctx, rm)
|
||||
if err == nil {
|
||||
err = r.export(ctx, rm)
|
||||
}
|
||||
r.rmPool.Put(rm)
|
||||
return err
|
||||
}
|
||||
|
||||
// Collect gathers all metric data related to the Reader from
|
||||
// the SDK and other Producers and stores the result in rm. The metric
|
||||
// data is not exported to the configured exporter, it is left to the caller to
|
||||
// handle that if desired.
|
||||
//
|
||||
// Collect will return an error if called after shutdown.
|
||||
// Collect will return an error if rm is a nil ResourceMetrics.
|
||||
// Collect will return an error if the context's Done channel is closed.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
|
||||
if rm == nil {
|
||||
return errors.New("periodic reader: *metricdata.ResourceMetrics is nil")
|
||||
}
|
||||
// TODO (#3047): When collect is updated to accept output as param, pass rm.
|
||||
return r.collect(ctx, r.sdkProducer.Load(), rm)
|
||||
}
|
||||
|
||||
// collect unwraps p as a produceHolder and returns its produce results.
|
||||
func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error {
|
||||
if p == nil {
|
||||
return ErrReaderNotRegistered
|
||||
}
|
||||
|
||||
ph, ok := p.(produceHolder)
|
||||
if !ok {
|
||||
// The atomic.Value is entirely in the periodicReader's control so
|
||||
// this should never happen. In the unforeseen case that this does
|
||||
// happen, return an error instead of panicking so a users code does
|
||||
// not halt in the processes.
|
||||
err := fmt.Errorf("periodic reader: invalid producer: %T", p)
|
||||
return err
|
||||
}
|
||||
|
||||
err := ph.produce(ctx, rm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var errs []error
|
||||
for _, producer := range r.externalProducers.Load().([]Producer) {
|
||||
externalMetrics, err := producer.Produce(ctx)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...)
|
||||
}
|
||||
|
||||
global.Debug("PeriodicReader collection", "Data", rm)
|
||||
|
||||
return unifyErrors(errs)
|
||||
}
|
||||
|
||||
// export exports metric data m using r's exporter.
|
||||
func (r *PeriodicReader) export(ctx context.Context, m *metricdata.ResourceMetrics) error {
|
||||
return r.exporter.Export(ctx, m)
|
||||
}
|
||||
|
||||
// ForceFlush flushes pending telemetry.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (r *PeriodicReader) ForceFlush(ctx context.Context) error {
|
||||
// Prioritize the ctx timeout if it is set.
|
||||
if _, ok := ctx.Deadline(); !ok {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
select {
|
||||
case r.flushCh <- errCh:
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
close(errCh)
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
case <-r.done:
|
||||
return ErrReaderShutdown
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
return r.exporter.ForceFlush(ctx)
|
||||
}
|
||||
|
||||
// Shutdown flushes pending telemetry and then stops the export pipeline.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (r *PeriodicReader) Shutdown(ctx context.Context) error {
|
||||
err := ErrReaderShutdown
|
||||
r.shutdownOnce.Do(func() {
|
||||
// Prioritize the ctx timeout if it is set.
|
||||
if _, ok := ctx.Deadline(); !ok {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, r.timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// Stop the run loop.
|
||||
r.cancel()
|
||||
<-r.done
|
||||
|
||||
// Any future call to Collect will now return ErrReaderShutdown.
|
||||
ph := r.sdkProducer.Swap(produceHolder{
|
||||
produce: shutdownProducer{}.produce,
|
||||
})
|
||||
|
||||
if ph != nil { // Reader was registered.
|
||||
// Flush pending telemetry.
|
||||
m := r.rmPool.Get().(*metricdata.ResourceMetrics)
|
||||
err = r.collect(ctx, ph, m)
|
||||
if err == nil {
|
||||
err = r.export(ctx, m)
|
||||
}
|
||||
r.rmPool.Put(m)
|
||||
}
|
||||
|
||||
sErr := r.exporter.Shutdown(ctx)
|
||||
if err == nil || err == ErrReaderShutdown {
|
||||
err = sErr
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
r.isShutdown = true
|
||||
// release references to Producer(s)
|
||||
r.externalProducers.Store([]Producer{})
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalLog returns logging data about the PeriodicReader.
|
||||
func (r *PeriodicReader) MarshalLog() interface{} {
|
||||
r.mu.Lock()
|
||||
down := r.isShutdown
|
||||
r.mu.Unlock()
|
||||
return struct {
|
||||
Type string
|
||||
Exporter Exporter
|
||||
Registered bool
|
||||
Shutdown bool
|
||||
Interval time.Duration
|
||||
Timeout time.Duration
|
||||
}{
|
||||
Type: "PeriodicReader",
|
||||
Exporter: r.exporter,
|
||||
Registered: r.sdkProducer.Load() != nil,
|
||||
Shutdown: down,
|
||||
Interval: r.interval,
|
||||
Timeout: r.timeout,
|
||||
}
|
||||
}
|
625
vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
generated
vendored
Normal file
625
vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go
generated
vendored
Normal file
@@ -0,0 +1,625 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/embedded"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal"
|
||||
"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
var (
|
||||
errCreatingAggregators = errors.New("could not create all aggregators")
|
||||
errIncompatibleAggregation = errors.New("incompatible aggregation")
|
||||
errUnknownAggregation = errors.New("unrecognized aggregation")
|
||||
)
|
||||
|
||||
// instrumentSync is a synchronization point between a pipeline and an
|
||||
// instrument's aggregate function.
|
||||
type instrumentSync struct {
|
||||
name string
|
||||
description string
|
||||
unit string
|
||||
compAgg aggregate.ComputeAggregation
|
||||
}
|
||||
|
||||
func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline {
|
||||
if res == nil {
|
||||
res = resource.Empty()
|
||||
}
|
||||
return &pipeline{
|
||||
resource: res,
|
||||
reader: reader,
|
||||
views: views,
|
||||
// aggregations is lazy allocated when needed.
|
||||
}
|
||||
}
|
||||
|
||||
// pipeline connects all of the instruments created by a meter provider to a Reader.
|
||||
// This is the object that will be `Reader.register()` when a meter provider is created.
|
||||
//
|
||||
// As instruments are created the instrument should be checked if it exists in
|
||||
// the views of a the Reader, and if so each aggregate function should be added
|
||||
// to the pipeline.
|
||||
type pipeline struct {
|
||||
resource *resource.Resource
|
||||
|
||||
reader Reader
|
||||
views []View
|
||||
|
||||
sync.Mutex
|
||||
aggregations map[instrumentation.Scope][]instrumentSync
|
||||
callbacks []func(context.Context) error
|
||||
multiCallbacks list.List
|
||||
}
|
||||
|
||||
// addSync adds the instrumentSync to pipeline p with scope. This method is not
|
||||
// idempotent. Duplicate calls will result in duplicate additions, it is the
|
||||
// callers responsibility to ensure this is called with unique values.
|
||||
func (p *pipeline) addSync(scope instrumentation.Scope, iSync instrumentSync) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if p.aggregations == nil {
|
||||
p.aggregations = map[instrumentation.Scope][]instrumentSync{
|
||||
scope: {iSync},
|
||||
}
|
||||
return
|
||||
}
|
||||
p.aggregations[scope] = append(p.aggregations[scope], iSync)
|
||||
}
|
||||
|
||||
// addCallback registers a single instrument callback to be run when
|
||||
// `produce()` is called.
|
||||
func (p *pipeline) addCallback(cback func(context.Context) error) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.callbacks = append(p.callbacks, cback)
|
||||
}
|
||||
|
||||
type multiCallback func(context.Context) error
|
||||
|
||||
// addMultiCallback registers a multi-instrument callback to be run when
|
||||
// `produce()` is called.
|
||||
func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
e := p.multiCallbacks.PushBack(c)
|
||||
return func() {
|
||||
p.Lock()
|
||||
p.multiCallbacks.Remove(e)
|
||||
p.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// produce returns aggregated metrics from a single collection.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
var errs multierror
|
||||
for _, c := range p.callbacks {
|
||||
// TODO make the callbacks parallel. ( #3034 )
|
||||
if err := c(ctx); err != nil {
|
||||
errs.append(err)
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
rm.Resource = nil
|
||||
rm.ScopeMetrics = rm.ScopeMetrics[:0]
|
||||
return err
|
||||
}
|
||||
}
|
||||
for e := p.multiCallbacks.Front(); e != nil; e = e.Next() {
|
||||
// TODO make the callbacks parallel. ( #3034 )
|
||||
f := e.Value.(multiCallback)
|
||||
if err := f(ctx); err != nil {
|
||||
errs.append(err)
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
// This means the context expired before we finished running callbacks.
|
||||
rm.Resource = nil
|
||||
rm.ScopeMetrics = rm.ScopeMetrics[:0]
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rm.Resource = p.resource
|
||||
rm.ScopeMetrics = internal.ReuseSlice(rm.ScopeMetrics, len(p.aggregations))
|
||||
|
||||
i := 0
|
||||
for scope, instruments := range p.aggregations {
|
||||
rm.ScopeMetrics[i].Metrics = internal.ReuseSlice(rm.ScopeMetrics[i].Metrics, len(instruments))
|
||||
j := 0
|
||||
for _, inst := range instruments {
|
||||
data := rm.ScopeMetrics[i].Metrics[j].Data
|
||||
if n := inst.compAgg(&data); n > 0 {
|
||||
rm.ScopeMetrics[i].Metrics[j].Name = inst.name
|
||||
rm.ScopeMetrics[i].Metrics[j].Description = inst.description
|
||||
rm.ScopeMetrics[i].Metrics[j].Unit = inst.unit
|
||||
rm.ScopeMetrics[i].Metrics[j].Data = data
|
||||
j++
|
||||
}
|
||||
}
|
||||
rm.ScopeMetrics[i].Metrics = rm.ScopeMetrics[i].Metrics[:j]
|
||||
if len(rm.ScopeMetrics[i].Metrics) > 0 {
|
||||
rm.ScopeMetrics[i].Scope = scope
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
rm.ScopeMetrics = rm.ScopeMetrics[:i]
|
||||
|
||||
return errs.errorOrNil()
|
||||
}
|
||||
|
||||
// inserter facilitates inserting of new instruments from a single scope into a
|
||||
// pipeline.
|
||||
type inserter[N int64 | float64] struct {
|
||||
// aggregators is a cache that holds aggregate function inputs whose
|
||||
// outputs have been inserted into the underlying reader pipeline. This
|
||||
// cache ensures no duplicate aggregate functions are inserted into the
|
||||
// reader pipeline and if a new request during an instrument creation asks
|
||||
// for the same aggregate function input the same instance is returned.
|
||||
aggregators *cache[instID, aggVal[N]]
|
||||
|
||||
// views is a cache that holds instrument identifiers for all the
|
||||
// instruments a Meter has created, it is provided from the Meter that owns
|
||||
// this inserter. This cache ensures during the creation of instruments
|
||||
// with the same name but different options (e.g. description, unit) a
|
||||
// warning message is logged.
|
||||
views *cache[string, instID]
|
||||
|
||||
pipeline *pipeline
|
||||
}
|
||||
|
||||
func newInserter[N int64 | float64](p *pipeline, vc *cache[string, instID]) *inserter[N] {
|
||||
if vc == nil {
|
||||
vc = &cache[string, instID]{}
|
||||
}
|
||||
return &inserter[N]{
|
||||
aggregators: &cache[instID, aggVal[N]]{},
|
||||
views: vc,
|
||||
pipeline: p,
|
||||
}
|
||||
}
|
||||
|
||||
// Instrument inserts the instrument inst with instUnit into a pipeline. All
|
||||
// views the pipeline contains are matched against, and any matching view that
|
||||
// creates a unique aggregate function will have its output inserted into the
|
||||
// pipeline and its input included in the returned slice.
|
||||
//
|
||||
// The returned aggregate function inputs are ensured to be deduplicated and
|
||||
// unique. If another view in another pipeline that is cached by this
|
||||
// inserter's cache has already inserted the same aggregate function for the
|
||||
// same instrument, that functions input instance is returned.
|
||||
//
|
||||
// If another instrument has already been inserted by this inserter, or any
|
||||
// other using the same cache, and it conflicts with the instrument being
|
||||
// inserted in this call, an aggregate function input matching the arguments
|
||||
// will still be returned but an Info level log message will also be logged to
|
||||
// the OTel global logger.
|
||||
//
|
||||
// If the passed instrument would result in an incompatible aggregate function,
|
||||
// an error is returned and that aggregate function output is not inserted nor
|
||||
// is its input returned.
|
||||
//
|
||||
// If an instrument is determined to use a Drop aggregation, that instrument is
|
||||
// not inserted nor returned.
|
||||
func (i *inserter[N]) Instrument(inst Instrument) ([]aggregate.Measure[N], error) {
|
||||
var (
|
||||
matched bool
|
||||
measures []aggregate.Measure[N]
|
||||
)
|
||||
|
||||
errs := &multierror{wrapped: errCreatingAggregators}
|
||||
seen := make(map[uint64]struct{})
|
||||
for _, v := range i.pipeline.views {
|
||||
stream, match := v(inst)
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
matched = true
|
||||
|
||||
in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream)
|
||||
if err != nil {
|
||||
errs.append(err)
|
||||
}
|
||||
if in == nil { // Drop aggregation.
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[id]; ok {
|
||||
// This aggregate function has already been added.
|
||||
continue
|
||||
}
|
||||
seen[id] = struct{}{}
|
||||
measures = append(measures, in)
|
||||
}
|
||||
|
||||
if matched {
|
||||
return measures, errs.errorOrNil()
|
||||
}
|
||||
|
||||
// Apply implicit default view if no explicit matched.
|
||||
stream := Stream{
|
||||
Name: inst.Name,
|
||||
Description: inst.Description,
|
||||
Unit: inst.Unit,
|
||||
}
|
||||
in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream)
|
||||
if err != nil {
|
||||
errs.append(err)
|
||||
}
|
||||
if in != nil {
|
||||
// Ensured to have not seen given matched was false.
|
||||
measures = append(measures, in)
|
||||
}
|
||||
return measures, errs.errorOrNil()
|
||||
}
|
||||
|
||||
var aggIDCount uint64
|
||||
|
||||
// aggVal is the cached value in an aggregators cache.
|
||||
type aggVal[N int64 | float64] struct {
|
||||
ID uint64
|
||||
Measure aggregate.Measure[N]
|
||||
Err error
|
||||
}
|
||||
|
||||
// cachedAggregator returns the appropriate aggregate input and output
|
||||
// functions for an instrument configuration. If the exact instrument has been
|
||||
// created within the inst.Scope, those aggregate function instances will be
|
||||
// returned. Otherwise, new computed aggregate functions will be cached and
|
||||
// returned.
|
||||
//
|
||||
// If the instrument configuration conflicts with an instrument that has
|
||||
// already been created (e.g. description, unit, data type) a warning will be
|
||||
// logged at the "Info" level with the global OTel logger. Valid new aggregate
|
||||
// functions for the instrument configuration will still be returned without an
|
||||
// error.
|
||||
//
|
||||
// If the instrument defines an unknown or incompatible aggregation, an error
|
||||
// is returned.
|
||||
func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind InstrumentKind, stream Stream) (meas aggregate.Measure[N], aggID uint64, err error) {
|
||||
switch stream.Aggregation.(type) {
|
||||
case nil:
|
||||
// Undefined, nil, means to use the default from the reader.
|
||||
stream.Aggregation = i.pipeline.reader.aggregation(kind)
|
||||
switch stream.Aggregation.(type) {
|
||||
case nil, AggregationDefault:
|
||||
// If the reader returns default or nil use the default selector.
|
||||
stream.Aggregation = DefaultAggregationSelector(kind)
|
||||
default:
|
||||
// Deep copy and validate before using.
|
||||
stream.Aggregation = stream.Aggregation.copy()
|
||||
if err := stream.Aggregation.err(); err != nil {
|
||||
orig := stream.Aggregation
|
||||
stream.Aggregation = DefaultAggregationSelector(kind)
|
||||
global.Error(
|
||||
err, "using default aggregation instead",
|
||||
"aggregation", orig,
|
||||
"replacement", stream.Aggregation,
|
||||
)
|
||||
}
|
||||
}
|
||||
case AggregationDefault:
|
||||
stream.Aggregation = DefaultAggregationSelector(kind)
|
||||
}
|
||||
|
||||
if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil {
|
||||
return nil, 0, fmt.Errorf(
|
||||
"creating aggregator with instrumentKind: %d, aggregation %v: %w",
|
||||
kind, stream.Aggregation, err,
|
||||
)
|
||||
}
|
||||
|
||||
id := i.instID(kind, stream)
|
||||
// If there is a conflict, the specification says the view should
|
||||
// still be applied and a warning should be logged.
|
||||
i.logConflict(id)
|
||||
|
||||
// If there are requests for the same instrument with different name
|
||||
// casing, the first-seen needs to be returned. Use a normalize ID for the
|
||||
// cache lookup to ensure the correct comparison.
|
||||
normID := id.normalize()
|
||||
cv := i.aggregators.Lookup(normID, func() aggVal[N] {
|
||||
b := aggregate.Builder[N]{
|
||||
Temporality: i.pipeline.reader.temporality(kind),
|
||||
}
|
||||
b.Filter = stream.AttributeFilter
|
||||
in, out, err := i.aggregateFunc(b, stream.Aggregation, kind)
|
||||
if err != nil {
|
||||
return aggVal[N]{0, nil, err}
|
||||
}
|
||||
if in == nil { // Drop aggregator.
|
||||
return aggVal[N]{0, nil, nil}
|
||||
}
|
||||
i.pipeline.addSync(scope, instrumentSync{
|
||||
// Use the first-seen name casing for this and all subsequent
|
||||
// requests of this instrument.
|
||||
name: stream.Name,
|
||||
description: stream.Description,
|
||||
unit: stream.Unit,
|
||||
compAgg: out,
|
||||
})
|
||||
id := atomic.AddUint64(&aggIDCount, 1)
|
||||
return aggVal[N]{id, in, err}
|
||||
})
|
||||
return cv.Measure, cv.ID, cv.Err
|
||||
}
|
||||
|
||||
// logConflict validates if an instrument with the same case-insensitive name
|
||||
// as id has already been created. If that instrument conflicts with id, a
|
||||
// warning is logged.
|
||||
func (i *inserter[N]) logConflict(id instID) {
|
||||
// The API specification defines names as case-insensitive. If there is a
|
||||
// different casing of a name it needs to be a conflict.
|
||||
name := id.normalize().Name
|
||||
existing := i.views.Lookup(name, func() instID { return id })
|
||||
if id == existing {
|
||||
return
|
||||
}
|
||||
|
||||
const msg = "duplicate metric stream definitions"
|
||||
args := []interface{}{
|
||||
"names", fmt.Sprintf("%q, %q", existing.Name, id.Name),
|
||||
"descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description),
|
||||
"kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind),
|
||||
"units", fmt.Sprintf("%s, %s", existing.Unit, id.Unit),
|
||||
"numbers", fmt.Sprintf("%s, %s", existing.Number, id.Number),
|
||||
}
|
||||
|
||||
// The specification recommends logging a suggested view to resolve
|
||||
// conflicts if possible.
|
||||
//
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#duplicate-instrument-registration
|
||||
if id.Unit != existing.Unit || id.Number != existing.Number {
|
||||
// There is no view resolution for these, don't make a suggestion.
|
||||
global.Warn(msg, args...)
|
||||
return
|
||||
}
|
||||
|
||||
var stream string
|
||||
if id.Name != existing.Name || id.Kind != existing.Kind {
|
||||
stream = `Stream{Name: "{{NEW_NAME}}"}`
|
||||
} else if id.Description != existing.Description {
|
||||
stream = fmt.Sprintf("Stream{Description: %q}", existing.Description)
|
||||
}
|
||||
|
||||
inst := fmt.Sprintf(
|
||||
"Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}",
|
||||
id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit,
|
||||
)
|
||||
args = append(args, "suggested.view", fmt.Sprintf("NewView(%s, %s)", inst, stream))
|
||||
|
||||
global.Warn(msg, args...)
|
||||
}
|
||||
|
||||
func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID {
|
||||
var zero N
|
||||
return instID{
|
||||
Name: stream.Name,
|
||||
Description: stream.Description,
|
||||
Unit: stream.Unit,
|
||||
Kind: kind,
|
||||
Number: fmt.Sprintf("%T", zero),
|
||||
}
|
||||
}
|
||||
|
||||
// aggregateFunc returns new aggregate functions matching agg, kind, and
|
||||
// monotonic. If the agg is unknown or temporality is invalid, an error is
|
||||
// returned.
|
||||
func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kind InstrumentKind) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) {
|
||||
switch a := agg.(type) {
|
||||
case AggregationDefault:
|
||||
return i.aggregateFunc(b, DefaultAggregationSelector(kind), kind)
|
||||
case AggregationDrop:
|
||||
// Return nil in and out to signify the drop aggregator.
|
||||
case AggregationLastValue:
|
||||
meas, comp = b.LastValue()
|
||||
case AggregationSum:
|
||||
switch kind {
|
||||
case InstrumentKindObservableCounter:
|
||||
meas, comp = b.PrecomputedSum(true)
|
||||
case InstrumentKindObservableUpDownCounter:
|
||||
meas, comp = b.PrecomputedSum(false)
|
||||
case InstrumentKindCounter, InstrumentKindHistogram:
|
||||
meas, comp = b.Sum(true)
|
||||
default:
|
||||
// InstrumentKindUpDownCounter, InstrumentKindObservableGauge, and
|
||||
// instrumentKindUndefined or other invalid instrument kinds.
|
||||
meas, comp = b.Sum(false)
|
||||
}
|
||||
case AggregationExplicitBucketHistogram:
|
||||
var noSum bool
|
||||
switch kind {
|
||||
case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge:
|
||||
// The sum should not be collected for any instrument that can make
|
||||
// negative measurements:
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations
|
||||
noSum = true
|
||||
}
|
||||
meas, comp = b.ExplicitBucketHistogram(a.Boundaries, a.NoMinMax, noSum)
|
||||
case AggregationBase2ExponentialHistogram:
|
||||
var noSum bool
|
||||
switch kind {
|
||||
case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge:
|
||||
// The sum should not be collected for any instrument that can make
|
||||
// negative measurements:
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations
|
||||
noSum = true
|
||||
}
|
||||
meas, comp = b.ExponentialBucketHistogram(a.MaxSize, a.MaxScale, a.NoMinMax, noSum)
|
||||
|
||||
default:
|
||||
err = errUnknownAggregation
|
||||
}
|
||||
|
||||
return meas, comp, err
|
||||
}
|
||||
|
||||
// isAggregatorCompatible checks if the aggregation can be used by the instrument.
|
||||
// Current compatibility:
|
||||
//
|
||||
// | Instrument Kind | Drop | LastValue | Sum | Histogram | Exponential Histogram |
|
||||
// |--------------------------|------|-----------|-----|-----------|-----------------------|
|
||||
// | Counter | ✓ | | ✓ | ✓ | ✓ |
|
||||
// | UpDownCounter | ✓ | | ✓ | ✓ | ✓ |
|
||||
// | Histogram | ✓ | | ✓ | ✓ | ✓ |
|
||||
// | Observable Counter | ✓ | | ✓ | ✓ | ✓ |
|
||||
// | Observable UpDownCounter | ✓ | | ✓ | ✓ | ✓ |
|
||||
// | Observable Gauge | ✓ | ✓ | | ✓ | ✓ |.
|
||||
func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error {
|
||||
switch agg.(type) {
|
||||
case AggregationDefault:
|
||||
return nil
|
||||
case AggregationExplicitBucketHistogram, AggregationBase2ExponentialHistogram:
|
||||
switch kind {
|
||||
case InstrumentKindCounter,
|
||||
InstrumentKindUpDownCounter,
|
||||
InstrumentKindHistogram,
|
||||
InstrumentKindObservableCounter,
|
||||
InstrumentKindObservableUpDownCounter,
|
||||
InstrumentKindObservableGauge:
|
||||
return nil
|
||||
default:
|
||||
return errIncompatibleAggregation
|
||||
}
|
||||
case AggregationSum:
|
||||
switch kind {
|
||||
case InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindUpDownCounter:
|
||||
return nil
|
||||
default:
|
||||
// TODO: review need for aggregation check after
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/issues/2710
|
||||
return errIncompatibleAggregation
|
||||
}
|
||||
case AggregationLastValue:
|
||||
if kind == InstrumentKindObservableGauge {
|
||||
return nil
|
||||
}
|
||||
// TODO: review need for aggregation check after
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/issues/2710
|
||||
return errIncompatibleAggregation
|
||||
case AggregationDrop:
|
||||
return nil
|
||||
default:
|
||||
// This is used passed checking for default, it should be an error at this point.
|
||||
return fmt.Errorf("%w: %v", errUnknownAggregation, agg)
|
||||
}
|
||||
}
|
||||
|
||||
// pipelines is the group of pipelines connecting Readers with instrument
|
||||
// measurement.
|
||||
type pipelines []*pipeline
|
||||
|
||||
func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines {
|
||||
pipes := make([]*pipeline, 0, len(readers))
|
||||
for _, r := range readers {
|
||||
p := newPipeline(res, r, views)
|
||||
r.register(p)
|
||||
pipes = append(pipes, p)
|
||||
}
|
||||
return pipes
|
||||
}
|
||||
|
||||
func (p pipelines) registerCallback(cback func(context.Context) error) {
|
||||
for _, pipe := range p {
|
||||
pipe.addCallback(cback)
|
||||
}
|
||||
}
|
||||
|
||||
func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration {
|
||||
unregs := make([]func(), len(p))
|
||||
for i, pipe := range p {
|
||||
unregs[i] = pipe.addMultiCallback(c)
|
||||
}
|
||||
return unregisterFuncs{f: unregs}
|
||||
}
|
||||
|
||||
type unregisterFuncs struct {
|
||||
embedded.Registration
|
||||
f []func()
|
||||
}
|
||||
|
||||
func (u unregisterFuncs) Unregister() error {
|
||||
for _, f := range u.f {
|
||||
f()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolver facilitates resolving aggregate functions an instrument calls to
|
||||
// aggregate measurements with while updating all pipelines that need to pull
|
||||
// from those aggregations.
|
||||
type resolver[N int64 | float64] struct {
|
||||
inserters []*inserter[N]
|
||||
}
|
||||
|
||||
func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) resolver[N] {
|
||||
in := make([]*inserter[N], len(p))
|
||||
for i := range in {
|
||||
in[i] = newInserter[N](p[i], vc)
|
||||
}
|
||||
return resolver[N]{in}
|
||||
}
|
||||
|
||||
// Aggregators returns the Aggregators that must be updated by the instrument
|
||||
// defined by key.
|
||||
func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) {
|
||||
var measures []aggregate.Measure[N]
|
||||
|
||||
errs := &multierror{}
|
||||
for _, i := range r.inserters {
|
||||
in, err := i.Instrument(id)
|
||||
if err != nil {
|
||||
errs.append(err)
|
||||
}
|
||||
measures = append(measures, in...)
|
||||
}
|
||||
return measures, errs.errorOrNil()
|
||||
}
|
||||
|
||||
type multierror struct {
|
||||
wrapped error
|
||||
errors []string
|
||||
}
|
||||
|
||||
func (m *multierror) errorOrNil() error {
|
||||
if len(m.errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
if m.wrapped == nil {
|
||||
return errors.New(strings.Join(m.errors, "; "))
|
||||
}
|
||||
return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; "))
|
||||
}
|
||||
|
||||
func (m *multierror) append(err error) {
|
||||
m.errors = append(m.errors, err.Error())
|
||||
}
|
154
vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
generated
vendored
Normal file
154
vendor/go.opentelemetry.io/otel/sdk/metric/provider.go
generated
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/embedded"
|
||||
"go.opentelemetry.io/otel/metric/noop"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
)
|
||||
|
||||
// MeterProvider handles the creation and coordination of Meters. All Meters
|
||||
// created by a MeterProvider will be associated with the same Resource, have
|
||||
// the same Views applied to them, and have their produced metric telemetry
|
||||
// passed to the configured Readers.
|
||||
type MeterProvider struct {
|
||||
embedded.MeterProvider
|
||||
|
||||
pipes pipelines
|
||||
meters cache[instrumentation.Scope, *meter]
|
||||
|
||||
forceFlush, shutdown func(context.Context) error
|
||||
stopped atomic.Bool
|
||||
}
|
||||
|
||||
// Compile-time check MeterProvider implements metric.MeterProvider.
|
||||
var _ metric.MeterProvider = (*MeterProvider)(nil)
|
||||
|
||||
// NewMeterProvider returns a new and configured MeterProvider.
|
||||
//
|
||||
// By default, the returned MeterProvider is configured with the default
|
||||
// Resource and no Readers. Readers cannot be added after a MeterProvider is
|
||||
// created. This means the returned MeterProvider, one created with no
|
||||
// Readers, will perform no operations.
|
||||
func NewMeterProvider(options ...Option) *MeterProvider {
|
||||
conf := newConfig(options)
|
||||
flush, sdown := conf.readerSignals()
|
||||
|
||||
mp := &MeterProvider{
|
||||
pipes: newPipelines(conf.res, conf.readers, conf.views),
|
||||
forceFlush: flush,
|
||||
shutdown: sdown,
|
||||
}
|
||||
// Log after creation so all readers show correctly they are registered.
|
||||
global.Info("MeterProvider created",
|
||||
"Resource", conf.res,
|
||||
"Readers", conf.readers,
|
||||
"Views", len(conf.views),
|
||||
)
|
||||
return mp
|
||||
}
|
||||
|
||||
// Meter returns a Meter with the given name and configured with options.
|
||||
//
|
||||
// The name should be the name of the instrumentation scope creating
|
||||
// telemetry. This name may be the same as the instrumented code only if that
|
||||
// code provides built-in instrumentation.
|
||||
//
|
||||
// Calls to the Meter method after Shutdown has been called will return Meters
|
||||
// that perform no operations.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metric.Meter {
|
||||
if name == "" {
|
||||
global.Warn("Invalid Meter name.", "name", name)
|
||||
}
|
||||
|
||||
if mp.stopped.Load() {
|
||||
return noop.Meter{}
|
||||
}
|
||||
|
||||
c := metric.NewMeterConfig(options...)
|
||||
s := instrumentation.Scope{
|
||||
Name: name,
|
||||
Version: c.InstrumentationVersion(),
|
||||
SchemaURL: c.SchemaURL(),
|
||||
}
|
||||
|
||||
global.Info("Meter created",
|
||||
"Name", s.Name,
|
||||
"Version", s.Version,
|
||||
"SchemaURL", s.SchemaURL,
|
||||
)
|
||||
|
||||
return mp.meters.Lookup(s, func() *meter {
|
||||
return newMeter(s, mp.pipes)
|
||||
})
|
||||
}
|
||||
|
||||
// ForceFlush flushes all pending telemetry.
|
||||
//
|
||||
// This method honors the deadline or cancellation of ctx. An appropriate
|
||||
// error will be returned in these situations. There is no guaranteed that all
|
||||
// telemetry be flushed or all resources have been released in these
|
||||
// situations.
|
||||
//
|
||||
// ForceFlush calls ForceFlush(context.Context) error
|
||||
// on all Readers that implements this method.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (mp *MeterProvider) ForceFlush(ctx context.Context) error {
|
||||
if mp.forceFlush != nil {
|
||||
return mp.forceFlush(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown shuts down the MeterProvider flushing all pending telemetry and
|
||||
// releasing any held computational resources.
|
||||
//
|
||||
// This call is idempotent. The first call will perform all flush and
|
||||
// releasing operations. Subsequent calls will perform no action and will
|
||||
// return an error stating this.
|
||||
//
|
||||
// Measurements made by instruments from meters this MeterProvider created
|
||||
// will not be exported after Shutdown is called.
|
||||
//
|
||||
// This method honors the deadline or cancellation of ctx. An appropriate
|
||||
// error will be returned in these situations. There is no guaranteed that all
|
||||
// telemetry be flushed or all resources have been released in these
|
||||
// situations.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
func (mp *MeterProvider) Shutdown(ctx context.Context) error {
|
||||
// Even though it may seem like there is a synchronization issue between the
|
||||
// call to `Store` and checking `shutdown`, the Go concurrency model ensures
|
||||
// that is not the case, as all the atomic operations executed in a program
|
||||
// behave as though executed in some sequentially consistent order. This
|
||||
// definition provides the same semantics as C++'s sequentially consistent
|
||||
// atomics and Java's volatile variables.
|
||||
// See https://go.dev/ref/mem#atomic and https://pkg.go.dev/sync/atomic.
|
||||
|
||||
mp.stopped.Store(true)
|
||||
if mp.shutdown != nil {
|
||||
return mp.shutdown(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
200
vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
generated
vendored
Normal file
200
vendor/go.opentelemetry.io/otel/sdk/metric/reader.go
generated
vendored
Normal file
@@ -0,0 +1,200 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
)
|
||||
|
||||
// errDuplicateRegister is logged by a Reader when an attempt to registered it
|
||||
// more than once occurs.
|
||||
var errDuplicateRegister = fmt.Errorf("duplicate reader registration")
|
||||
|
||||
// ErrReaderNotRegistered is returned if Collect or Shutdown are called before
|
||||
// the reader is registered with a MeterProvider.
|
||||
var ErrReaderNotRegistered = fmt.Errorf("reader is not registered")
|
||||
|
||||
// ErrReaderShutdown is returned if Collect or Shutdown are called after a
|
||||
// reader has been Shutdown once.
|
||||
var ErrReaderShutdown = fmt.Errorf("reader is shutdown")
|
||||
|
||||
// errNonPositiveDuration is logged when an environmental variable
|
||||
// has non-positive value.
|
||||
var errNonPositiveDuration = fmt.Errorf("non-positive duration")
|
||||
|
||||
// Reader is the interface used between the SDK and an
|
||||
// exporter. Control flow is bi-directional through the
|
||||
// Reader, since the SDK initiates ForceFlush and Shutdown
|
||||
// while the exporter initiates collection. The Register() method here
|
||||
// informs the Reader that it can begin reading, signaling the
|
||||
// start of bi-directional control flow.
|
||||
//
|
||||
// Typically, push-based exporters that are periodic will
|
||||
// implement PeroidicExporter themselves and construct a
|
||||
// PeriodicReader to satisfy this interface.
|
||||
//
|
||||
// Pull-based exporters will typically implement Register
|
||||
// themselves, since they read on demand.
|
||||
//
|
||||
// Warning: methods may be added to this interface in minor releases.
|
||||
type Reader interface {
|
||||
// register registers a Reader with a MeterProvider.
|
||||
// The producer argument allows the Reader to signal the sdk to collect
|
||||
// and send aggregated metric measurements.
|
||||
register(sdkProducer)
|
||||
|
||||
// temporality reports the Temporality for the instrument kind provided.
|
||||
//
|
||||
// This method needs to be concurrent safe with itself and all the other
|
||||
// Reader methods.
|
||||
temporality(InstrumentKind) metricdata.Temporality
|
||||
|
||||
// aggregation returns what Aggregation to use for an instrument kind.
|
||||
//
|
||||
// This method needs to be concurrent safe with itself and all the other
|
||||
// Reader methods.
|
||||
aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type.
|
||||
|
||||
// Collect gathers and returns all metric data related to the Reader from
|
||||
// the SDK and stores it in out. An error is returned if this is called
|
||||
// after Shutdown or if out is nil.
|
||||
//
|
||||
// This method needs to be concurrent safe, and the cancellation of the
|
||||
// passed context is expected to be honored.
|
||||
Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// Shutdown flushes all metric measurements held in an export pipeline and releases any
|
||||
// held computational resources.
|
||||
//
|
||||
// This deadline or cancellation of the passed context are honored. An appropriate
|
||||
// error will be returned in these situations. There is no guaranteed that all
|
||||
// telemetry be flushed or all resources have been released in these
|
||||
// situations.
|
||||
//
|
||||
// After Shutdown is called, calls to Collect will perform no operation and instead will return
|
||||
// an error indicating the shutdown state.
|
||||
//
|
||||
// This method needs to be concurrent safe.
|
||||
Shutdown(context.Context) error
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
}
|
||||
|
||||
// sdkProducer produces metrics for a Reader.
|
||||
type sdkProducer interface {
|
||||
// produce returns aggregated metrics from a single collection.
|
||||
//
|
||||
// This method is safe to call concurrently.
|
||||
produce(context.Context, *metricdata.ResourceMetrics) error
|
||||
}
|
||||
|
||||
// Producer produces metrics for a Reader from an external source.
|
||||
type Producer interface {
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// Produce returns aggregated metrics from an external source.
|
||||
//
|
||||
// This method should be safe to call concurrently.
|
||||
Produce(context.Context) ([]metricdata.ScopeMetrics, error)
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
}
|
||||
|
||||
// produceHolder is used as an atomic.Value to wrap the non-concrete producer
|
||||
// type.
|
||||
type produceHolder struct {
|
||||
produce func(context.Context, *metricdata.ResourceMetrics) error
|
||||
}
|
||||
|
||||
// shutdownProducer produces an ErrReaderShutdown error always.
|
||||
type shutdownProducer struct{}
|
||||
|
||||
// produce returns an ErrReaderShutdown error.
|
||||
func (p shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error {
|
||||
return ErrReaderShutdown
|
||||
}
|
||||
|
||||
// TemporalitySelector selects the temporality to use based on the InstrumentKind.
|
||||
type TemporalitySelector func(InstrumentKind) metricdata.Temporality
|
||||
|
||||
// DefaultTemporalitySelector is the default TemporalitySelector used if
|
||||
// WithTemporalitySelector is not provided. CumulativeTemporality will be used
|
||||
// for all instrument kinds if this TemporalitySelector is used.
|
||||
func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality {
|
||||
return metricdata.CumulativeTemporality
|
||||
}
|
||||
|
||||
// AggregationSelector selects the aggregation and the parameters to use for
|
||||
// that aggregation based on the InstrumentKind.
|
||||
//
|
||||
// If the Aggregation returned is nil or DefaultAggregation, the selection from
|
||||
// DefaultAggregationSelector will be used.
|
||||
type AggregationSelector func(InstrumentKind) Aggregation
|
||||
|
||||
// DefaultAggregationSelector returns the default aggregation and parameters
|
||||
// that will be used to summarize measurement made from an instrument of
|
||||
// InstrumentKind. This AggregationSelector using the following selection
|
||||
// mapping: Counter ⇨ Sum, Observable Counter ⇨ Sum, UpDownCounter ⇨ Sum,
|
||||
// Observable UpDownCounter ⇨ Sum, Observable Gauge ⇨ LastValue,
|
||||
// Histogram ⇨ ExplicitBucketHistogram.
|
||||
func DefaultAggregationSelector(ik InstrumentKind) Aggregation {
|
||||
switch ik {
|
||||
case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter:
|
||||
return AggregationSum{}
|
||||
case InstrumentKindObservableGauge:
|
||||
return AggregationLastValue{}
|
||||
case InstrumentKindHistogram:
|
||||
return AggregationExplicitBucketHistogram{
|
||||
Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000},
|
||||
NoMinMax: false,
|
||||
}
|
||||
}
|
||||
panic("unknown instrument kind")
|
||||
}
|
||||
|
||||
// ReaderOption is an option which can be applied to manual or Periodic
|
||||
// readers.
|
||||
type ReaderOption interface {
|
||||
PeriodicReaderOption
|
||||
ManualReaderOption
|
||||
}
|
||||
|
||||
// WithProducers registers producers as an external Producer of metric data
|
||||
// for this Reader.
|
||||
func WithProducer(p Producer) ReaderOption {
|
||||
return producerOption{p: p}
|
||||
}
|
||||
|
||||
type producerOption struct {
|
||||
p Producer
|
||||
}
|
||||
|
||||
// applyManual returns a manualReaderConfig with option applied.
|
||||
func (o producerOption) applyManual(c manualReaderConfig) manualReaderConfig {
|
||||
c.producers = append(c.producers, o.p)
|
||||
return c
|
||||
}
|
||||
|
||||
// applyPeriodic returns a periodicReaderConfig with option applied.
|
||||
func (o producerOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig {
|
||||
c.producers = append(c.producers, o.p)
|
||||
return c
|
||||
}
|
20
vendor/go.opentelemetry.io/otel/sdk/metric/version.go
generated
vendored
Normal file
20
vendor/go.opentelemetry.io/otel/sdk/metric/version.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
// version is the current release version of the metric SDK in use.
|
||||
func version() string {
|
||||
return "1.19.0"
|
||||
}
|
128
vendor/go.opentelemetry.io/otel/sdk/metric/view.go
generated
vendored
Normal file
128
vendor/go.opentelemetry.io/otel/sdk/metric/view.go
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric // import "go.opentelemetry.io/otel/sdk/metric"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/internal/global"
|
||||
)
|
||||
|
||||
var (
|
||||
errMultiInst = errors.New("name replacement for multiple instruments")
|
||||
errEmptyView = errors.New("no criteria provided for view")
|
||||
|
||||
emptyView = func(Instrument) (Stream, bool) { return Stream{}, false }
|
||||
)
|
||||
|
||||
// View is an override to the default behavior of the SDK. It defines how data
|
||||
// should be collected for certain instruments. It returns true and the exact
|
||||
// Stream to use for matching Instruments. Otherwise, if the view does not
|
||||
// match, false is returned.
|
||||
type View func(Instrument) (Stream, bool)
|
||||
|
||||
// NewView returns a View that applies the Stream mask for all instruments that
|
||||
// match criteria. The returned View will only apply mask if all non-zero-value
|
||||
// fields of criteria match the corresponding Instrument passed to the view. If
|
||||
// no criteria are provided, all field of criteria are their zero-values, a
|
||||
// view that matches no instruments is returned. If you need to match a
|
||||
// zero-value field, create a View directly.
|
||||
//
|
||||
// The Name field of criteria supports wildcard pattern matching. The "*"
|
||||
// wildcard is recognized as matching zero or more characters, and "?" is
|
||||
// recognized as matching exactly one character. For example, a pattern of "*"
|
||||
// matches all instrument names.
|
||||
//
|
||||
// The Stream mask only applies updates for non-zero-value fields. By default,
|
||||
// the Instrument the View matches against will be use for the Name,
|
||||
// Description, and Unit of the returned Stream and no Aggregation or
|
||||
// AttributeFilter are set. All non-zero-value fields of mask are used instead
|
||||
// of the default. If you need to zero out an Stream field returned from a
|
||||
// View, create a View directly.
|
||||
func NewView(criteria Instrument, mask Stream) View {
|
||||
if criteria.empty() {
|
||||
global.Error(
|
||||
errEmptyView, "dropping view",
|
||||
"mask", mask,
|
||||
)
|
||||
return emptyView
|
||||
}
|
||||
|
||||
var matchFunc func(Instrument) bool
|
||||
if strings.ContainsAny(criteria.Name, "*?") {
|
||||
if mask.Name != "" {
|
||||
global.Error(
|
||||
errMultiInst, "dropping view",
|
||||
"criteria", criteria,
|
||||
"mask", mask,
|
||||
)
|
||||
return emptyView
|
||||
}
|
||||
|
||||
// Handle branching here in NewView instead of criteria.matches so
|
||||
// criteria.matches remains inlinable for the simple case.
|
||||
pattern := regexp.QuoteMeta(criteria.Name)
|
||||
pattern = "^" + pattern + "$"
|
||||
pattern = strings.ReplaceAll(pattern, `\?`, ".")
|
||||
pattern = strings.ReplaceAll(pattern, `\*`, ".*")
|
||||
re := regexp.MustCompile(pattern)
|
||||
matchFunc = func(i Instrument) bool {
|
||||
return re.MatchString(i.Name) &&
|
||||
criteria.matchesDescription(i) &&
|
||||
criteria.matchesKind(i) &&
|
||||
criteria.matchesUnit(i) &&
|
||||
criteria.matchesScope(i)
|
||||
}
|
||||
} else {
|
||||
matchFunc = criteria.matches
|
||||
}
|
||||
|
||||
var agg Aggregation
|
||||
if mask.Aggregation != nil {
|
||||
agg = mask.Aggregation.copy()
|
||||
if err := agg.err(); err != nil {
|
||||
global.Error(
|
||||
err, "not using aggregation with view",
|
||||
"criteria", criteria,
|
||||
"mask", mask,
|
||||
)
|
||||
agg = nil
|
||||
}
|
||||
}
|
||||
|
||||
return func(i Instrument) (Stream, bool) {
|
||||
if matchFunc(i) {
|
||||
return Stream{
|
||||
Name: nonZero(mask.Name, i.Name),
|
||||
Description: nonZero(mask.Description, i.Description),
|
||||
Unit: nonZero(mask.Unit, i.Unit),
|
||||
Aggregation: agg,
|
||||
AttributeFilter: mask.AttributeFilter,
|
||||
}, true
|
||||
}
|
||||
return Stream{}, false
|
||||
}
|
||||
}
|
||||
|
||||
// nonZero returns v if it is non-zero-valued, otherwise alt.
|
||||
func nonZero[T comparable](v, alt T) T {
|
||||
var zero T
|
||||
if v != zero {
|
||||
return v
|
||||
}
|
||||
return alt
|
||||
}
|
371
vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go
generated
vendored
Normal file
371
vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.go
generated
vendored
Normal file
@@ -0,0 +1,371 @@
|
||||
// Copyright 2019, OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.6
|
||||
// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
v1 "go.opentelemetry.io/proto/otlp/metrics/v1"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type ExportMetricsServiceRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// An array of ResourceMetrics.
|
||||
// For data coming from a single resource this array will typically contain one
|
||||
// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
|
||||
// data from multiple origins typically batch the data before forwarding further and
|
||||
// in that case this array will contain multiple elements.
|
||||
ResourceMetrics []*v1.ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ExportMetricsServiceRequest) Reset() {
|
||||
*x = ExportMetricsServiceRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ExportMetricsServiceRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExportMetricsServiceRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ExportMetricsServiceRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExportMetricsServiceRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) {
|
||||
return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *ExportMetricsServiceRequest) GetResourceMetrics() []*v1.ResourceMetrics {
|
||||
if x != nil {
|
||||
return x.ResourceMetrics
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExportMetricsServiceResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The details of a partially successful export request.
|
||||
//
|
||||
// If the request is only partially accepted
|
||||
// (i.e. when the server accepts only parts of the data and rejects the rest)
|
||||
// the server MUST initialize the `partial_success` field and MUST
|
||||
// set the `rejected_<signal>` with the number of items it rejected.
|
||||
//
|
||||
// Servers MAY also make use of the `partial_success` field to convey
|
||||
// warnings/suggestions to senders even when the request was fully accepted.
|
||||
// In such cases, the `rejected_<signal>` MUST have a value of `0` and
|
||||
// the `error_message` MUST be non-empty.
|
||||
//
|
||||
// A `partial_success` message with an empty value (rejected_<signal> = 0 and
|
||||
// `error_message` = "") is equivalent to it not being set/present. Senders
|
||||
// SHOULD interpret it the same way as in the full success case.
|
||||
PartialSuccess *ExportMetricsPartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ExportMetricsServiceResponse) Reset() {
|
||||
*x = ExportMetricsServiceResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ExportMetricsServiceResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExportMetricsServiceResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ExportMetricsServiceResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExportMetricsServiceResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) {
|
||||
return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *ExportMetricsServiceResponse) GetPartialSuccess() *ExportMetricsPartialSuccess {
|
||||
if x != nil {
|
||||
return x.PartialSuccess
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExportMetricsPartialSuccess struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The number of rejected data points.
|
||||
//
|
||||
// A `rejected_<signal>` field holding a `0` value indicates that the
|
||||
// request was fully accepted.
|
||||
RejectedDataPoints int64 `protobuf:"varint,1,opt,name=rejected_data_points,json=rejectedDataPoints,proto3" json:"rejected_data_points,omitempty"`
|
||||
// A developer-facing human-readable message in English. It should be used
|
||||
// either to explain why the server rejected parts of the data during a partial
|
||||
// success or to convey warnings/suggestions during a full success. The message
|
||||
// should offer guidance on how users can address such issues.
|
||||
//
|
||||
// error_message is an optional field. An error_message with an empty value
|
||||
// is equivalent to it not being set.
|
||||
ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ExportMetricsPartialSuccess) Reset() {
|
||||
*x = ExportMetricsPartialSuccess{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ExportMetricsPartialSuccess) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExportMetricsPartialSuccess) ProtoMessage() {}
|
||||
|
||||
func (x *ExportMetricsPartialSuccess) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExportMetricsPartialSuccess.ProtoReflect.Descriptor instead.
|
||||
func (*ExportMetricsPartialSuccess) Descriptor() ([]byte, []int) {
|
||||
return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ExportMetricsPartialSuccess) GetRejectedDataPoints() int64 {
|
||||
if x != nil {
|
||||
return x.RejectedDataPoints
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ExportMetricsPartialSuccess) GetErrorMessage() string {
|
||||
if x != nil {
|
||||
return x.ErrorMessage
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_opentelemetry_proto_collector_metrics_v1_metrics_service_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc = []byte{
|
||||
0x0a, 0x3e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f,
|
||||
0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69,
|
||||
0x63, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x12, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e,
|
||||
0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x2c, 0x6f, 0x70, 0x65, 0x6e,
|
||||
0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
|
||||
0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69,
|
||||
0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x79, 0x0a, 0x1b, 0x45, 0x78, 0x70, 0x6f,
|
||||
0x72, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75,
|
||||
0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
|
||||
0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69,
|
||||
0x63, 0x73, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x72,
|
||||
0x69, 0x63, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x1c, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65,
|
||||
0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6e, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f,
|
||||
0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e,
|
||||
0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6d, 0x65,
|
||||
0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d,
|
||||
0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63,
|
||||
0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63,
|
||||
0x63, 0x65, 0x73, 0x73, 0x22, 0x74, 0x0a, 0x1b, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65,
|
||||
0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63,
|
||||
0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f,
|
||||
0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x03, 0x52, 0x12, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x50,
|
||||
0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d,
|
||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72,
|
||||
0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x32, 0xac, 0x01, 0x0a, 0x0e, 0x4d,
|
||||
0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x99, 0x01,
|
||||
0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x45, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74,
|
||||
0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63,
|
||||
0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
|
||||
0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x46, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e,
|
||||
0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72,
|
||||
0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0xa4, 0x01, 0x0a, 0x2b, 0x69, 0x6f,
|
||||
0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x6d,
|
||||
0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x13, 0x4d, 0x65, 0x74, 0x72, 0x69,
|
||||
0x63, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
|
||||
0x5a, 0x33, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
|
||||
0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70,
|
||||
0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69,
|
||||
0x63, 0x73, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x28, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65,
|
||||
0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c,
|
||||
0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x56, 0x31,
|
||||
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescOnce sync.Once
|
||||
file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData = file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescGZIP() []byte {
|
||||
file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescOnce.Do(func() {
|
||||
file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData)
|
||||
})
|
||||
return file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||
var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_goTypes = []interface{}{
|
||||
(*ExportMetricsServiceRequest)(nil), // 0: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest
|
||||
(*ExportMetricsServiceResponse)(nil), // 1: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse
|
||||
(*ExportMetricsPartialSuccess)(nil), // 2: opentelemetry.proto.collector.metrics.v1.ExportMetricsPartialSuccess
|
||||
(*v1.ResourceMetrics)(nil), // 3: opentelemetry.proto.metrics.v1.ResourceMetrics
|
||||
}
|
||||
var file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_depIdxs = []int32{
|
||||
3, // 0: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest.resource_metrics:type_name -> opentelemetry.proto.metrics.v1.ResourceMetrics
|
||||
2, // 1: opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.metrics.v1.ExportMetricsPartialSuccess
|
||||
0, // 2: opentelemetry.proto.collector.metrics.v1.MetricsService.Export:input_type -> opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest
|
||||
1, // 3: opentelemetry.proto.collector.metrics.v1.MetricsService.Export:output_type -> opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse
|
||||
3, // [3:4] is the sub-list for method output_type
|
||||
2, // [2:3] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_init() }
|
||||
func file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_init() {
|
||||
if File_opentelemetry_proto_collector_metrics_v1_metrics_service_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExportMetricsServiceRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExportMetricsServiceResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExportMetricsPartialSuccess); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 3,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_goTypes,
|
||||
DependencyIndexes: file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_depIdxs,
|
||||
MessageInfos: file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_msgTypes,
|
||||
}.Build()
|
||||
File_opentelemetry_proto_collector_metrics_v1_metrics_service_proto = out.File
|
||||
file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_rawDesc = nil
|
||||
file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_goTypes = nil
|
||||
file_opentelemetry_proto_collector_metrics_v1_metrics_service_proto_depIdxs = nil
|
||||
}
|
171
vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go
generated
vendored
Normal file
171
vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service.pb.gw.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
|
||||
// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto
|
||||
|
||||
/*
|
||||
Package v1 is a reverse proxy.
|
||||
|
||||
It translates gRPC into RESTful JSON APIs.
|
||||
*/
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// Suppress "imported and not used" errors
|
||||
var _ codes.Code
|
||||
var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
var _ = metadata.Join
|
||||
|
||||
func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq ExportMetricsServiceRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server MetricsServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq ExportMetricsServiceRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.Export(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterMetricsServiceHandlerServer registers the http handlers for service MetricsService to "mux".
|
||||
// UnaryRPC :call MetricsServiceServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMetricsServiceHandlerFromEndpoint instead.
|
||||
func RegisterMetricsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server MetricsServiceServer) error {
|
||||
|
||||
mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_MetricsService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_MetricsService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterMetricsServiceHandlerFromEndpoint is same as RegisterMetricsServiceHandler but
|
||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||
func RegisterMetricsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
return RegisterMetricsServiceHandler(ctx, mux, conn)
|
||||
}
|
||||
|
||||
// RegisterMetricsServiceHandler registers the http handlers for service MetricsService to "mux".
|
||||
// The handlers forward requests to the grpc endpoint over "conn".
|
||||
func RegisterMetricsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||
return RegisterMetricsServiceHandlerClient(ctx, mux, NewMetricsServiceClient(conn))
|
||||
}
|
||||
|
||||
// RegisterMetricsServiceHandlerClient registers the http handlers for service MetricsService
|
||||
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MetricsServiceClient".
|
||||
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MetricsServiceClient"
|
||||
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||
// "MetricsServiceClient" to call the correct interceptors.
|
||||
func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MetricsServiceClient) error {
|
||||
|
||||
mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", runtime.WithHTTPPathPattern("/v1/metrics"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_MetricsService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_MetricsService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_MetricsService_Export_0 = runtime.ForwardResponseMessage
|
||||
)
|
109
vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go
generated
vendored
Normal file
109
vendor/go.opentelemetry.io/proto/otlp/collector/metrics/v1/metrics_service_grpc.pb.go
generated
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.1.0
|
||||
// - protoc v3.21.6
|
||||
// source: opentelemetry/proto/collector/metrics/v1/metrics_service.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// MetricsServiceClient is the client API for MetricsService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type MetricsServiceClient interface {
|
||||
// For performance reasons, it is recommended to keep this RPC
|
||||
// alive for the entire life of the application.
|
||||
Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error)
|
||||
}
|
||||
|
||||
type metricsServiceClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewMetricsServiceClient(cc grpc.ClientConnInterface) MetricsServiceClient {
|
||||
return &metricsServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *metricsServiceClient) Export(ctx context.Context, in *ExportMetricsServiceRequest, opts ...grpc.CallOption) (*ExportMetricsServiceResponse, error) {
|
||||
out := new(ExportMetricsServiceResponse)
|
||||
err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// MetricsServiceServer is the server API for MetricsService service.
|
||||
// All implementations must embed UnimplementedMetricsServiceServer
|
||||
// for forward compatibility
|
||||
type MetricsServiceServer interface {
|
||||
// For performance reasons, it is recommended to keep this RPC
|
||||
// alive for the entire life of the application.
|
||||
Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error)
|
||||
mustEmbedUnimplementedMetricsServiceServer()
|
||||
}
|
||||
|
||||
// UnimplementedMetricsServiceServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedMetricsServiceServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedMetricsServiceServer) Export(context.Context, *ExportMetricsServiceRequest) (*ExportMetricsServiceResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
|
||||
}
|
||||
func (UnimplementedMetricsServiceServer) mustEmbedUnimplementedMetricsServiceServer() {}
|
||||
|
||||
// UnsafeMetricsServiceServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to MetricsServiceServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeMetricsServiceServer interface {
|
||||
mustEmbedUnimplementedMetricsServiceServer()
|
||||
}
|
||||
|
||||
func RegisterMetricsServiceServer(s grpc.ServiceRegistrar, srv MetricsServiceServer) {
|
||||
s.RegisterService(&MetricsService_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _MetricsService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ExportMetricsServiceRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MetricsServiceServer).Export(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/opentelemetry.proto.collector.metrics.v1.MetricsService/Export",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MetricsServiceServer).Export(ctx, req.(*ExportMetricsServiceRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// MetricsService_ServiceDesc is the grpc.ServiceDesc for MetricsService service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var MetricsService_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "opentelemetry.proto.collector.metrics.v1.MetricsService",
|
||||
HandlerType: (*MetricsServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "Export",
|
||||
Handler: _MetricsService_Export_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "opentelemetry/proto/collector/metrics/v1/metrics_service.proto",
|
||||
}
|
2489
vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go
generated
vendored
Normal file
2489
vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user